diff -pruN 1.4.0-1/README.rst 1.5.0-1/README.rst
--- 1.4.0-1/README.rst	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/README.rst	2025-06-18 05:27:42.000000000 +0000
@@ -17,6 +17,20 @@ Handles multi-API versions of Azure Stor
 
 Change Log
 ----------
+1.5.0
+++++++
+* packaging: Remove unused import, cleanup build pipeline
+* storage, cosmosdb:
+    - Remove all track1 sdks
+* blob:
+    - Keep only v2021-08-06, v2022-11-02
+* fileshare:
+    - Keep only v2025-05-05
+* filedatalake:
+    - Keep only v2021-08-06
+* queue:
+    - Keep only v2018-03-28
+
 1.4.0
 ++++++
 * fileshare: Support v2025-05-05(12.21.0) and remove v2024-08-04
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/__init__.py 1.5.0-1/azure/multiapi/cosmosdb/__init__.py
--- 1.4.0-1/azure/multiapi/cosmosdb/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-﻿__path__ = __import__('pkgutil').extend_path(__path__, __name__)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/__init__.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/__init__.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-__import__('pkg_resources').declare_namespace(__name__)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/__init__.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/__init__.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,44 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ._constants import (
-    __author__,
-    __version__,
-    X_MS_VERSION,
-)
-from .cloudstorageaccount import CloudStorageAccount
-from .models import (
-    RetentionPolicy,
-    Logging,
-    Metrics,
-    CorsRule,
-    ServiceProperties,
-    AccessPolicy,
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-    Protocol,
-    ServiceStats,
-    GeoReplication,
-    LocationMode,
-    RetryContext,
-)
-from .retry import (
-    ExponentialRetry,
-    LinearRetry,
-    no_retry,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_auth.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_auth.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_auth.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_auth.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,130 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ._common_conversion import (
-    _sign_string,
-)
-
-import logging
-logger = logging.getLogger(__name__)
-
-
-class _StorageSharedKeyAuthentication(object):
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = request.path.split('?')[0]
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        signature = _sign_string(self.account_key, string_to_sign)
-        auth_string = 'SharedKey ' + self.account_name + ':' + signature
-        request.headers['Authorization'] = auth_string
-
-
-class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        logger.debug("String_to_sign=%s", string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value:
-                string_to_sign += '\n' + name.lower() + ':' + value
-
-        return string_to_sign
-
-
-class _StorageTableSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                ['content-md5', 'content-type', 'x-ms-date'],
-            ) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        logger.debug("String_to_sign=%s", string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        for name, value in request.query.items():
-            if name == 'comp':
-                return '?comp=' + value
-        return ''
-
-
-class _StorageNoAuthentication(object):
-    def sign_request(self, request):
-        pass
-
-
-class _StorageSASAuthentication(object):
-    def __init__(self, sas_token):
-        self.sas_token = sas_token
-
-    def sign_request(self, request):
-        # if 'sig=' is present, then the request has already been signed
-        # as is the case when performing retries
-        if 'sig=' in request.path:
-            return
-        if '?' in request.path:
-            request.path += '&'
-        else:
-            request.path += '?'
-
-        request.path += self.sas_token
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_common_conversion.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_common_conversion.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_common_conversion.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_common_conversion.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,135 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-import sys
-from io import (SEEK_SET)
-
-from dateutil.tz import tzutc
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
-)
-from .models import (
-    _unicode_type,
-)
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-def _int_to_str(value):
-    return str(int(value)) if value is not None else None
-
-
-def _bool_to_str(value):
-    if value is None:
-        return None
-
-    if isinstance(value, bool):
-        if value:
-            return 'true'
-        else:
-            return 'false'
-
-    return str(value)
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _datetime_to_utc_string(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value is None:
-        return None
-
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-
-    return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
-
-
-def _encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def _decode_base64_to_bytes(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def _decode_base64_to_text(data):
-    decoded_bytes = _decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def _sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = _decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, _unicode_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, _unicode_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = _encode_base64(digest)
-    return encoded_digest
-
-
-def _get_content_md5(data):
-    md5 = hashlib.md5()
-    if isinstance(data, bytes):
-        md5.update(data)
-    elif hasattr(data, 'read'):
-        pos = 0
-        try:
-            pos = data.tell()
-        except:
-            pass
-        for chunk in iter(lambda: data.read(4096), b""):
-            md5.update(chunk)
-        try:
-            data.seek(pos, SEEK_SET)
-        except (AttributeError, IOError):
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
-    else:
-        raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
-
-    return base64.b64encode(md5.digest()).decode('utf-8')
-
-
-def _lower(text):
-    return text.lower()
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_connection.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_connection.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_connection.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_connection.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,167 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from urllib.parse import urlparse
-else:
-    from urlparse import urlparse
-
-from ._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_KEY,
-    DEV_BLOB_HOST,
-    DEV_QUEUE_HOST,
-    DEV_TABLE_HOST
-)
-from ._error import (
-    _ERROR_STORAGE_MISSING_INFO,
-)
-
-_EMULATOR_ENDPOINTS = {
-    'blob': DEV_BLOB_HOST,
-    'queue': DEV_QUEUE_HOST,
-    'table': DEV_TABLE_HOST,
-    'file': '',
-}
-
-_CONNECTION_ENDPOINTS = {
-    'blob': 'BlobEndpoint',
-    'queue': 'QueueEndpoint',
-    'table': 'TableEndpoint',
-    'file': 'FileEndpoint',
-}
-
-_CONNECTION_ENDPOINTS_SECONDARY = {
-    'blob': 'BlobSecondaryEndpoint',
-    'queue': 'QueueSecondaryEndpoint',
-    'table': 'TableSecondaryEndpoint',
-    'file': 'FileSecondaryEndpoint',
-}
-
-class _ServiceParameters(object):
-    def __init__(self, service, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, 
-                 custom_domain=None, custom_domain_secondary=None):
-
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.protocol = protocol or DEFAULT_PROTOCOL
-        self.is_emulated = is_emulated
-
-        if is_emulated:
-            self.account_name = DEV_ACCOUNT_NAME
-            self.protocol = 'http'
-
-            # Only set the account key if a sas_token is not present to allow sas to be used with the emulator
-            self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
-
-            self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], self.account_name)
-            self.secondary_endpoint = '{}/{}-secondary'.format(_EMULATOR_ENDPOINTS[service], self.account_name)
-        else:
-            # Strip whitespace from the key
-            if self.account_key:
-                self.account_key = self.account_key.strip()
-
-            endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
-
-            # Setup the primary endpoint
-            if custom_domain:
-                parsed_url = urlparse(custom_domain)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.primary_endpoint = parsed_url.netloc + path
-                self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme
-            else:
-                if not self.account_name:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-                self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
-
-            # Setup the secondary endpoint
-            if custom_domain_secondary:
-                if not custom_domain:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)   
-
-                parsed_url = urlparse(custom_domain_secondary)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.secondary_endpoint = parsed_url.netloc + path
-            else:
-                if self.account_name:
-                    self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
-                else:
-                    self.secondary_endpoint = None
-
-    @staticmethod
-    def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, is_emulated=None,
-                               protocol=None, endpoint_suffix=None, custom_domain=None, request_session=None,
-                               connection_string=None, socket_timeout=None):
-        if connection_string:
-            params = _ServiceParameters._from_connection_string(connection_string, service)
-        elif is_emulated:
-            params = _ServiceParameters(service, is_emulated=True)
-        elif account_name:
-            params = _ServiceParameters(service,
-                                        account_name=account_name,
-                                        account_key=account_key,
-                                        sas_token=sas_token,
-                                        is_emulated=is_emulated,
-                                        protocol=protocol,
-                                        endpoint_suffix=endpoint_suffix,
-                                        custom_domain=custom_domain)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        params.request_session = request_session
-        params.socket_timeout = socket_timeout
-        return params
-
-    @staticmethod
-    def _from_connection_string(connection_string, service):
-        # Split into key=value pairs removing empties, then split the pairs into a dict
-        config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
-
-        # Authentication
-        account_name = config.get('AccountName')
-        account_key = config.get('AccountKey')
-        sas_token = config.get('SharedAccessSignature')
-
-        # Emulator
-        is_emulated = config.get('UseDevelopmentStorage')
-
-        # Basic URL Configuration
-        protocol = config.get('DefaultEndpointsProtocol')
-        endpoint_suffix = config.get('EndpointSuffix')
-
-        # Custom URLs
-        endpoint = config.get(_CONNECTION_ENDPOINTS[service])
-        endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
-
-        return _ServiceParameters(service,
-                                  account_name=account_name,
-                                  account_key=account_key,
-                                  sas_token=sas_token,
-                                  is_emulated=is_emulated,
-                                  protocol=protocol,
-                                  endpoint_suffix=endpoint_suffix,
-                                  custom_domain=endpoint,
-                                  custom_domain_secondary=endpoint_secondary)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_constants.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_constants.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,45 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import platform
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '0.37.1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-04-17'
-
-# UserAgent string sample: 'Azure-CosmosDB/0.32.0 (Python CPython 3.4.2; Windows 8)'
-USER_AGENT_STRING = 'Azure-CosmosDB/{} (Python {} {}; {} {})'.format(__version__, platform.python_implementation(),
-                                                                    platform.python_version(), platform.system(),
-                                                                    platform.release())
-
-# Live ServiceClient URLs
-SERVICE_HOST_BASE = 'core.windows.net'
-DEFAULT_PROTOCOL = 'https'
-
-# Development ServiceClient URLs
-DEV_BLOB_HOST = '127.0.0.1:10000'
-DEV_QUEUE_HOST = '127.0.0.1:10001'
-DEV_TABLE_HOST = '127.0.0.1:10002'
-
-# Default credentials for Development Storage Service
-DEV_ACCOUNT_NAME = 'devstoreaccount1'
-DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
-
-# Socket timeout in seconds
-DEFAULT_SOCKET_TIMEOUT = 20
-
-# Encryption constants
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_deserialization.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_deserialization.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,353 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-from ._common_conversion import _to_str
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    ServiceProperties,
-    Logging,
-    Metrics,
-    CorsRule,
-    AccessPolicy,
-    _dict,
-    GeoReplication,
-    ServiceStats,
-)
-
-
-def _int_to_str(value):
-    return value if value is None else int(value)
-
-
-def _bool(value):
-    return value.lower() == 'true'
-
-
-def _to_upper_str(value):
-    return _to_str(value).upper() if value is not None else None
-
-
-def _get_download_size(start_range, end_range, resource_size):
-    if start_range is not None:
-        end_range = end_range if end_range else (resource_size if resource_size else None)
-        if end_range is not None:
-            return end_range - start_range
-        else:
-            return None
-    else:
-        return resource_size
-
-
-GET_PROPERTIES_ATTRIBUTE_MAP = {
-    'last-modified': (None, 'last_modified', parser.parse),
-    'etag': (None, 'etag', _to_str),
-    'x-ms-blob-type': (None, 'blob_type', _to_str),
-    'content-length': (None, 'content_length', _int_to_str),
-    'content-range': (None, 'content_range', _to_str),
-    'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _int_to_str),
-    'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _int_to_str),
-    'x-ms-access-tier': (None, 'blob_tier', _to_str),
-    'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
-    'x-ms-archive-status': (None, 'rehydration_status', _to_str),
-    'x-ms-share-quota': (None, 'quota', _int_to_str),
-    'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
-    'content-type': ('content_settings', 'content_type', _to_str),
-    'cache-control': ('content_settings', 'cache_control', _to_str),
-    'content-encoding': ('content_settings', 'content_encoding', _to_str),
-    'content-disposition': ('content_settings', 'content_disposition', _to_str),
-    'content-language': ('content_settings', 'content_language', _to_str),
-    'content-md5': ('content_settings', 'content_md5', _to_str),
-    'x-ms-lease-status': ('lease', 'status', _to_str),
-    'x-ms-lease-state': ('lease', 'state', _to_str),
-    'x-ms-lease-duration': ('lease', 'duration', _to_str),
-    'x-ms-copy-id': ('copy', 'id', _to_str),
-    'x-ms-copy-source': ('copy', 'source', _to_str),
-    'x-ms-copy-status': ('copy', 'status', _to_str),
-    'x-ms-copy-progress': ('copy', 'progress', _to_str),
-    'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
-    'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str),
-    'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
-}
-
-
-def _parse_metadata(response):
-    '''
-    Extracts out resource metadata information.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    metadata = _dict()
-    for key, value in response.headers.items():
-        if key.startswith('x-ms-meta-'):
-            metadata[key[10:]] = _to_str(value)
-
-    return metadata
-
-
-def _parse_properties(response, result_class):
-    '''
-    Extracts out resource properties and metadata information.
-    Ignores the standard http headers.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    props = result_class()
-    for key, value in response.headers.items():
-        info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
-        if info:
-            if info[0] is None:
-                setattr(props, info[1], info[2](value))
-            else:
-                attr = getattr(props, info[0])
-                setattr(attr, info[1], info[2](value))
-
-    if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None:
-        props.blob_tier = _to_upper_str(props.blob_tier)
-    return props
-
-
-def _parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def _convert_xml_to_signed_identifiers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <SignedIdentifiers>
-      <SignedIdentifier>
-        <Id>unique-value</Id>
-        <AccessPolicy>
-          <Start>start-time</Start>
-          <Expiry>expiry-time</Expiry>
-          <Permission>abbreviated-permission-list</Permission>
-        </AccessPolicy>
-      </SignedIdentifier>
-    </SignedIdentifiers>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    list_element = ETree.fromstring(response.body)
-    signed_identifiers = _dict()
-
-    for signed_identifier_element in list_element.findall('SignedIdentifier'):
-        # Id element
-        id = signed_identifier_element.find('Id').text
-
-        # Access policy element
-        access_policy = AccessPolicy()
-        access_policy_element = signed_identifier_element.find('AccessPolicy')
-        if access_policy_element is not None:
-            start_element = access_policy_element.find('Start')
-            if start_element is not None:
-                access_policy.start = parser.parse(start_element.text)
-
-            expiry_element = access_policy_element.find('Expiry')
-            if expiry_element is not None:
-                access_policy.expiry = parser.parse(expiry_element.text)
-
-            access_policy.permission = access_policy_element.findtext('Permission')
-
-        signed_identifiers[id] = access_policy
-
-    return signed_identifiers
-
-
-def _convert_xml_to_service_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceStats>
-      <GeoReplication>      
-          <Status>live|bootstrap|unavailable</Status>
-          <LastSyncTime>sync-time|<empty></LastSyncTime>
-      </GeoReplication>
-    </StorageServiceStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_stats_element = ETree.fromstring(response.body)
-
-    geo_replication_element = service_stats_element.find('GeoReplication')
-
-    geo_replication = GeoReplication()
-    geo_replication.status = geo_replication_element.find('Status').text
-    last_sync_time = geo_replication_element.find('LastSyncTime').text
-    geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None
-
-    service_stats = ServiceStats()
-    service_stats.geo_replication = geo_replication
-    return service_stats
-
-
-def _convert_xml_to_service_properties(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-    </StorageServiceProperties>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_properties_element = ETree.fromstring(response.body)
-    service_properties = ServiceProperties()
-
-    # Logging
-    logging = service_properties_element.find('Logging')
-    if logging is not None:
-        service_properties.logging = Logging()
-        service_properties.logging.version = logging.find('Version').text
-        service_properties.logging.delete = _bool(logging.find('Delete').text)
-        service_properties.logging.read = _bool(logging.find('Read').text)
-        service_properties.logging.write = _bool(logging.find('Write').text)
-
-        _convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
-                                         service_properties.logging.retention_policy)
-    # HourMetrics
-    hour_metrics_element = service_properties_element.find('HourMetrics')
-    if hour_metrics_element is not None:
-        service_properties.hour_metrics = Metrics()
-        _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
-
-    # MinuteMetrics
-    minute_metrics_element = service_properties_element.find('MinuteMetrics')
-    if minute_metrics_element is not None:
-        service_properties.minute_metrics = Metrics()
-        _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
-
-    # CORS
-    cors = service_properties_element.find('Cors')
-    if cors is not None:
-        service_properties.cors = list()
-        for rule in cors.findall('CorsRule'):
-            allowed_origins = rule.find('AllowedOrigins').text.split(',')
-
-            allowed_methods = rule.find('AllowedMethods').text.split(',')
-
-            max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
-
-            cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
-
-            exposed_headers = rule.find('ExposedHeaders').text
-            if exposed_headers is not None:
-                cors_rule.exposed_headers = exposed_headers.split(',')
-
-            allowed_headers = rule.find('AllowedHeaders').text
-            if allowed_headers is not None:
-                cors_rule.allowed_headers = allowed_headers.split(',')
-
-            service_properties.cors.append(cors_rule)
-
-    # Target version
-    target_version = service_properties_element.find('DefaultServiceVersion')
-    if target_version is not None:
-        service_properties.target_version = target_version.text
-
-    return service_properties
-
-
-def _convert_xml_to_metrics(xml, metrics):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    metrics.version = xml.find('Version').text
-
-    # Enabled
-    metrics.enabled = _bool(xml.find('Enabled').text)
-
-    # IncludeAPIs
-    include_apis_element = xml.find('IncludeAPIs')
-    if include_apis_element is not None:
-        metrics.include_apis = _bool(include_apis_element.text)
-
-    # RetentionPolicy
-    _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
-
-
-def _convert_xml_to_retention_policy(xml, retention_policy):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    retention_policy.enabled = _bool(xml.find('Enabled').text)
-
-    # Days
-    days_element = xml.find('Days')
-    if days_element is not None:
-        retention_policy.days = int(days_element.text)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_encryption.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_encryption.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,242 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-
-from ._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-    __version__,
-)
-from ._error import (
-    _ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
-    _validate_not_none,
-    _validate_encryption_protocol_version,
-    _validate_key_encryption_key_unwrap,
-    _validate_kek_id,
-)
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier, 
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-    
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    except KeyError:
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        (i.e. TableService) instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer 
-        to high service object (i.e. TableService) instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol)
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_unwrap(key_encryption_key)
-    _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid())
-
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_error.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_error.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,186 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from sys import version_info
-
-if version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-from azure.common import (
-    AzureHttpError,
-    AzureConflictHttpError,
-    AzureMissingResourceHttpError,
-    AzureException,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-)
-
-_ERROR_CONFLICT = 'Conflict ({0})'
-_ERROR_NOT_FOUND = 'Not found ({0})'
-_ERROR_UNKNOWN = 'Unknown error ({0})'
-_ERROR_STORAGE_MISSING_INFO = \
-    'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
-_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
-    'The emulator does not support the file service.'
-_ERROR_ACCESS_POLICY = \
-    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
-    'instance'
-_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
-_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
-_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
-_ERROR_VALUE_NONE = '{0} should not be None.'
-_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
-_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use {0} chunk downloader more than 1 thread must be ' + \
-    'used since get_{0}_to_bytes should be called for single threaded ' + \
-    '{0} downloads.'
-_ERROR_START_END_NEEDED_FOR_MD5 = \
-    'Both end_range and start_range need to be specified ' + \
-    'for getting content MD5.'
-_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
-    'Getting content MD5 for a range greater than 4MB ' + \
-    'is not supported.'
-_ERROR_MD5_MISMATCH = \
-    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
-_ERROR_TOO_MANY_ACCESS_POLICIES = \
-    'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
-    'Encryption version is not supported.'
-_ERROR_DECRYPTION_FAILURE = \
-    'Decryption failed'
-_ERROR_ENCRYPTION_REQUIRED = \
-    'Encryption required but no key was provided.'
-_ERROR_DECRYPTION_REQUIRED = \
-    'Decryption required but neither key nor resolver was provided.' + \
-    ' If you do not want to decypt, please do not set the require encryption flag.'
-_ERROR_INVALID_KID = \
-    'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
-_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
-    'Specified encryption algorithm is not supported.'
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
-                                           ' for this method.'
-_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
-_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
-                            'Data was either not encrypted or metadata has been lost.'
-
-
-def _dont_fail_on_exist(error):
-    ''' don't throw exception if the resource exists.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureConflictHttpError):
-        return False
-    else:
-        raise error
-
-
-def _dont_fail_not_exist(error):
-    ''' don't throw exception if the resource doesn't exist.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureMissingResourceHttpError):
-        return False
-    else:
-        raise error
-
-
-def _http_error_handler(http_error):
-    ''' Simple error handler for azure.'''
-    message = str(http_error)
-    if http_error.respbody is not None:
-        message += '\n' + http_error.respbody.decode('utf-8-sig')
-    raise AzureHttpError(message, http_error.status)
-
-
-def _validate_type_bytes(param_name, param):
-    if not isinstance(param, bytes):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _validate_type_bytes_or_stream(param_name, param):
-    if not (isinstance(param, bytes) or hasattr(param, 'read')):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError(_ERROR_VALUE_NONE.format(param_name))
-
-
-def _validate_content_match(server_md5, computed_md5):
-    if server_md5 != computed_md5:
-        raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
-
-
-def _validate_access_policies(identifiers):
-    if identifiers and len(identifiers) > 5:
-        raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-def _validate_key_encryption_key_unwrap(kek):
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-
-
-def _validate_encryption_required(require_encryption, kek):
-    if require_encryption and (kek is None):
-        raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
-
-
-def _validate_decryption_required(require_encryption, kek, resolver):
-    if (require_encryption and (kek is None) and
-            (resolver is None)):
-        raise ValueError(_ERROR_DECRYPTION_REQUIRED)
-
-
-def _validate_encryption_protocol_version(encryption_protocol):
-    if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-
-
-def _validate_kek_id(kid, resolved_id):
-    if not (kid == resolved_id):
-        raise ValueError(_ERROR_INVALID_KID)
-
-
-def _validate_encryption_unsupported(require_encryption, key_encryption_key):
-    if require_encryption or (key_encryption_key is not None):
-        raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/__init__.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/__init__.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,83 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-
-class HTTPError(Exception):
-    '''
-    Represents an HTTP Exception when response status code >= 300.
-
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar list headers:
-        the returned headers, as a list of (name, value) pairs
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, respheader, respbody):
-        self.status = status
-        self.respheader = respheader
-        self.respbody = respbody
-        Exception.__init__(self, message)
-
-
-class HTTPResponse(object):
-    '''
-    Represents a response from an HTTP request.
-    
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar dict headers:
-        the returned headers
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, headers, body):
-        self.status = status
-        self.message = message
-        self.headers = headers
-        self.body = body
-
-
-class HTTPRequest(object):
-    '''
-    Represents an HTTP Request.
-
-    :ivar str host:
-        the host name to connect to
-    :ivar str method:
-        the method to use to connect (string such as GET, POST, PUT, etc.)
-    :ivar str path:
-        the uri fragment
-    :ivar dict query:
-        query parameters
-    :ivar dict headers:
-        header values
-    :ivar bytes body:
-        the body of the request.
-    '''
-
-    def __init__(self):
-        self.host = ''
-        self.method = ''
-        self.path = ''
-        self.query = {}  # list of (name, value)
-        self.headers = {}  # list of (header name, header value)
-        self.body = ''
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/httpclient.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/httpclient.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/httpclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_http/httpclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,112 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import logging
-from . import HTTPResponse
-from .._serialization import _get_data_bytes_or_stream_only
-logger = logging.getLogger(__name__)
-
-
-class _HTTPClient(object):
-    '''
-    Takes the request and sends it to cloud service and returns the response.
-    '''
-
-    def __init__(self, protocol=None, session=None, timeout=None):
-        '''
-        :param str protocol:
-            http or https.
-        :param requests.Session session:
-            session object created with requests library (or compatible).
-        :param int timeout:
-            timeout for the http request, in seconds.
-        '''
-        self.protocol = protocol
-        self.session = session
-        self.timeout = timeout
-
-        # By default, requests adds an Accept:*/* and Accept-Encoding to the session, 
-        # which causes issues with some Azure REST APIs. Removing these here gives us 
-        # the flexibility to add it back on a case by case basis.
-        if 'Accept' in self.session.headers:
-            del self.session.headers['Accept']
-
-        if 'Accept-Encoding' in self.session.headers:
-            del self.session.headers['Accept-Encoding']
-
-        self.proxies = None
-
-    def set_proxy(self, host, port, user, password):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        Note that we set the proxies directly on the request later on rather than
-        using the session object as requests has a bug where session proxy is ignored
-        in favor of environment proxy. So, auth will not work unless it is passed
-        directly when making the request as this overrides both.
-
-        :param str host:
-            Address of the proxy. Ex: '192.168.0.100'
-        :param int port:
-            Port of the proxy. Ex: 6000
-        :param str user:
-            User for proxy authorization.
-        :param str password:
-            Password for proxy authorization.
-        '''
-        if user and password:
-            proxy_string = '{}:{}@{}:{}'.format(user, password, host, port)
-        else:
-            proxy_string = '{}:{}'.format(host, port)
-
-        self.proxies = {'http': 'http://{}'.format(proxy_string),
-                        'https': 'https://{}'.format(proxy_string)}
-
-    def perform_request(self, request):
-        '''
-        Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If 
-        the response code indicates an error, raise an HTTPError.    
-        
-        :param HTTPRequest request:
-            The request to serialize and send.
-        :return: An HTTPResponse containing the parsed HTTP response.
-        :rtype: :class:`~azure.storage.common._http.HTTPResponse`
-        '''
-        # Verify the body is in bytes or either a file-like/stream object
-        if request.body:
-            request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-
-        # Construct the URI
-        uri = self.protocol.lower() + '://' + request.host + request.path
-
-        # Send the request
-        response = self.session.request(request.method,
-                                        uri,
-                                        params=request.query,
-                                        headers=request.headers,
-                                        data=request.body or None,
-                                        timeout=self.timeout,
-                                        proxies=self.proxies)
-
-        # Parse the response
-        status = int(response.status_code)
-        response_headers = {}
-        for key, name in response.headers.items():
-            response_headers[key.lower()] = name
-
-        wrap = HTTPResponse(status, response.reason, response_headers, response.content)
-        response.close()
-
-        return wrap
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_serialization.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_serialization.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,352 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-import uuid
-from datetime import date
-from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation)
-from os import fstat
-from time import time
-from wsgiref.handlers import format_date_time
-
-from dateutil.tz import tzutc
-
-if sys.version_info >= (3,):
-    from urllib.parse import quote as url_quote
-else:
-    from urllib2 import quote as url_quote
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES,
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-)
-from ._constants import (
-    X_MS_VERSION,
-    USER_AGENT_STRING,
-)
-from .models import (
-    _unicode_type,
-)
-from ._common_conversion import (
-    _str,
-)
-
-
-def _to_utc_datetime(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _update_request(request):
-    # Verify body
-    if request.body:
-        request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-        length = _len_plus(request.body)
-
-        # only scenario where this case is plausible is if the stream object is not seekable.
-        if length is None:
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM)
-
-        # if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
-        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
-            request.headers['Content-Length'] = str(length)
-
-    # append addtional headers based on the service
-    request.headers['x-ms-version'] = X_MS_VERSION
-    request.headers['User-Agent'] = USER_AGENT_STRING
-    request.headers['x-ms-client-request-id'] = str(uuid.uuid1())
-
-    # If the host has a path component (ex local storage), move it
-    path = request.host.split('/', 1)
-    if len(path) == 2:
-        request.host = path[0]
-        request.path = '/{}{}'.format(path[1], request.path)
-
-    # Encode and optionally add local storage prefix to path
-    request.path = url_quote(request.path, '/()$=\',~')
-
-
-def _add_metadata_headers(metadata, request):
-    if metadata:
-        if not request.headers:
-            request.headers = {}
-        for name, value in metadata.items():
-            request.headers['x-ms-meta-' + name] = value
-
-
-def _add_date_header(request):
-    current_time = format_date_time(time())
-    request.headers['x-ms-date'] = current_time
-
-
-def _get_data_bytes_only(param_name, param_value):
-    '''Validates the request body passed in and converts it to bytes
-    if our policy allows it.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _get_data_bytes_or_stream_only(param_name, param_value):
-    '''Validates the request body passed in is a stream/file-like or bytes
-    object.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _get_request_body(request_body):
-    '''Converts an object into a request body.  If it's None
-    we'll return an empty string, if it's one of our objects it'll
-    convert it to XML and return it.  Otherwise we just use the object
-    directly'''
-    if request_body is None:
-        return b''
-
-    if isinstance(request_body, bytes) or isinstance(request_body, IOBase):
-        return request_body
-
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    request_body = str(request_body)
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    return request_body
-
-
-def _convert_signed_identifiers_to_xml(signed_identifiers):
-    if signed_identifiers is None:
-        return ''
-
-    sis = ETree.Element('SignedIdentifiers')
-    for id, access_policy in signed_identifiers.items():
-        # Root signed identifers element
-        si = ETree.SubElement(sis, 'SignedIdentifier')
-
-        # Id element
-        ETree.SubElement(si, 'Id').text = id
-
-        # Access policy element
-        policy = ETree.SubElement(si, 'AccessPolicy')
-
-        if access_policy.start:
-            start = access_policy.start
-            if isinstance(access_policy.start, date):
-                start = _to_utc_datetime(start)
-            ETree.SubElement(policy, 'Start').text = start
-
-        if access_policy.expiry:
-            expiry = access_policy.expiry
-            if isinstance(access_policy.expiry, date):
-                expiry = _to_utc_datetime(expiry)
-            ETree.SubElement(policy, 'Expiry').text = expiry
-
-        if access_policy.permission:
-            ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-    </StorageServiceProperties>
-    '''
-    service_properties_element = ETree.Element('StorageServiceProperties')
-
-    # Logging
-    if logging:
-        logging_element = ETree.SubElement(service_properties_element, 'Logging')
-        ETree.SubElement(logging_element, 'Version').text = logging.version
-        ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
-        ETree.SubElement(logging_element, 'Read').text = str(logging.read)
-        ETree.SubElement(logging_element, 'Write').text = str(logging.write)
-
-        retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
-        _convert_retention_policy_to_xml(logging.retention_policy, retention_element)
-
-    # HourMetrics
-    if hour_metrics:
-        hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
-        _convert_metrics_to_xml(hour_metrics, hour_metrics_element)
-
-    # MinuteMetrics
-    if minute_metrics:
-        minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
-        _convert_metrics_to_xml(minute_metrics, minute_metrics_element)
-
-    # CORS
-    # Make sure to still serialize empty list
-    if cors is not None:
-        cors_element = ETree.SubElement(service_properties_element, 'Cors')
-        for rule in cors:
-            cors_rule = ETree.SubElement(cors_element, 'CorsRule')
-            ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
-            ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
-            ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
-            ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
-            ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
-
-    # Target version
-    if target_version:
-        ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
-                                                            method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_metrics_to_xml(metrics, root):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    ETree.SubElement(root, 'Version').text = metrics.version
-
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
-
-    # IncludeAPIs
-    if metrics.enabled and metrics.include_apis is not None:
-        ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
-
-    # RetentionPolicy
-    retention_element = ETree.SubElement(root, 'RetentionPolicy')
-    _convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
-
-
-def _convert_retention_policy_to_xml(retention_policy, root):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
-
-    # Days
-    if retention_policy.enabled and retention_policy.days:
-        ETree.SubElement(root, 'Days').text = str(retention_policy.days)
-
-
-def _len_plus(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            return fstat(fileno).st_size
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/cloudstorageaccount.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/cloudstorageaccount.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/cloudstorageaccount.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/cloudstorageaccount.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,130 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-# Note that we import TableService on demand
-# because this module is imported by azure/storage/__init__
-# ie. we don't want 'import azure.storage' to trigger an automatic import
-# of table package.
-
-from ._error import _validate_not_none
-from .models import (
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-
-
-class CloudStorageAccount(object):
-    """
-    Provides a factory for creating the table service
-    with a common account name and account key or sas token.  Users can either 
-    use the factory or can construct the appropriate service directly.
-    """
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless is_emulated is used.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.is_emulated = is_emulated
-
-    def create_table_service(self):
-        '''
-        Creates a TableService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.table.tableservice.TableService`
-        '''
-        try:
-            from ..table.tableservice import TableService
-            return TableService(self.account_name, self.account_key,
-                                sas_token=self.sas_token,
-                                is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-table is required. '
-                            + 'Please install it using "pip install azure-storage-table"')
-
-    def generate_shared_access_signature(self, services, resource_types,
-                                         permission, expiry, start=None,
-                                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(services, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/models.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/models.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,629 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info < (3,):
-    from collections import Iterable
-
-    _unicode_type = unicode
-else:
-    from collections.abc import Iterable
-
-    _unicode_type = str
-
-from ._error import (
-    _validate_not_none
-)
-
-
-class _HeaderDict(dict):
-    def __getitem__(self, index):
-        return super(_HeaderDict, self).__getitem__(index.lower())
-
-
-class _list(list):
-    '''Used so that additional properties can be set on the return list'''
-    pass
-
-
-class _dict(dict):
-    '''Used so that additional properties can be set on the return dictionary'''
-    pass
-
-
-class _OperationContext(object):
-    '''
-    Contains information that lasts the lifetime of an operation. This operation 
-    may span multiple calls to the Azure service.
-
-    :ivar bool location_lock: 
-        Whether the location should be locked for this operation.
-    :ivar str location: 
-        The location to lock to.
-    '''
-
-    def __init__(self, location_lock=False):
-        self.location_lock = location_lock
-        self.host_location = None
-
-
-class ListGenerator(Iterable):
-    '''
-    A generator object used to list storage resources. The generator will lazily 
-    follow the continuation tokens returned by the service and stop when all 
-    resources have been returned or max_results is reached.
-
-    If max_results is specified and the account has more than that number of 
-    resources, the generator will have a populated next_marker field once it 
-    finishes. This marker can be used to create a new generator if more 
-    results are desired.
-    '''
-
-    def __init__(self, resources, list_method, list_args, list_kwargs):
-        self.items = resources
-        self.next_marker = resources.next_marker
-
-        self._list_method = list_method
-        self._list_args = list_args
-        self._list_kwargs = list_kwargs
-
-    def __iter__(self):
-        # return results
-        for i in self.items:
-            yield i
-
-        while True:
-            # if no more results on the service, return
-            if not self.next_marker:
-                break
-
-            # update the marker args
-            self._list_kwargs['marker'] = self.next_marker
-
-            # handle max results, if present
-            max_results = self._list_kwargs.get('max_results')
-            if max_results is not None:
-                max_results = max_results - len(self.items)
-
-                # if we've reached max_results, return
-                # else, update the max_results arg
-                if max_results <= 0:
-                    break
-                else:
-                    self._list_kwargs['max_results'] = max_results
-
-            # get the next segment
-            resources = self._list_method(*self._list_args, **self._list_kwargs)
-            self.items = resources
-            self.next_marker = resources.next_marker
-
-            # return results
-            for i in self.items:
-                yield i
-
-
-class RetryContext(object):
-    '''
-    Contains the request and response information that can be used to determine 
-    whether and how to retry. This context is stored across retries and may be 
-    used to store other information relevant to the retry strategy.
-
-    :ivar ~azure.storage.common._http.HTTPRequest request:
-        The request sent to the storage service.
-    :ivar ~azure.storage.common._http.HTTPResponse response:
-        The response returned by the storage service.
-    :ivar LocationMode location_mode: 
-        The location the request was sent to.
-    '''
-
-    def __init__(self):
-        self.request = None
-        self.response = None
-        self.location_mode = None
-
-
-class LocationMode(object):
-    '''
-    Specifies the location the request should be sent to. This mode only applies 
-    for RA-GRS accounts which allow secondary read access. All other account types 
-    must use PRIMARY.
-    '''
-
-    PRIMARY = 'primary'
-    ''' Requests should be sent to the primary location. '''
-
-    SECONDARY = 'secondary'
-    ''' Requests should be sent to the secondary location, if possible. '''
-
-
-class RetentionPolicy(object):
-    '''
-    By default, Storage Analytics will not delete any logging or metrics data. Blobs 
-    and table entities will continue to be written until the shared 20TB limit is 
-    reached. Once the 20TB limit is reached, Storage Analytics will stop writing 
-    new data and will not resume until free space is available. This 20TB limit 
-    is independent of the total limit for your storage account.
-
-    There are two ways to delete Storage Analytics data: by manually making deletion 
-    requests or by setting a data retention policy. Manual requests to delete Storage 
-    Analytics data are billable, but delete requests resulting from a retention policy 
-    are not billable.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled: 
-            Indicates whether a retention policy is enabled for the 
-            storage service. If disabled, logging and metrics data will be retained 
-            infinitely by the service unless explicitly deleted.
-        :param int days: 
-            Required if enabled is true. Indicates the number of 
-            days that metrics or logging data should be retained. All data older 
-            than this value will be deleted. The minimum value you can specify is 1; 
-            the largest value is 365 (one year).
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class Logging(object):
-    '''
-    Storage Analytics logs detailed information about successful and failed requests 
-    to a storage service. This information can be used to monitor individual requests 
-    and to diagnose issues with a storage service. Requests are logged on a best-effort 
-    basis.
-
-    All logs are stored in block blobs in a container named $logs, which is
-    automatically created when Storage Analytics is enabled for a storage account. 
-    The $logs container is located in the blob namespace of the storage account. 
-    This container cannot be deleted once Storage Analytics has been enabled, though 
-    its contents can be deleted.
-
-    For more information, see  https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
-    '''
-
-    def __init__(self, delete=False, read=False, write=False,
-                 retention_policy=None):
-        '''
-        :param bool delete: 
-            Indicates whether all delete requests should be logged.
-        :param bool read: 
-            Indicates whether all read requests should be logged.
-        :param bool write: 
-            Indicates whether all write requests should be logged.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("read", read)
-        _validate_not_none("write", write)
-        _validate_not_none("delete", delete)
-
-        self.version = u'1.0'
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class Metrics(object):
-    '''
-    Metrics include aggregated transaction statistics and capacity data about requests 
-    to a storage service. Transactions are reported at both the API operation level 
-    as well as at the storage service level, and capacity is reported at the storage 
-    service level. Metrics data can be used to analyze storage service usage, diagnose 
-    issues with requests made against the storage service, and to improve the 
-    performance of applications that use a service.
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
-    '''
-
-    def __init__(self, enabled=False, include_apis=None,
-                 retention_policy=None):
-        '''
-        :param bool enabled: 
-            Indicates whether metrics are enabled for 
-            the service.
-        :param bool include_apis: 
-            Required if enabled is True. Indicates whether metrics 
-            should generate summary statistics for called API operations.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("include_apis", include_apis)
-
-        self.version = u'1.0'
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class CorsRule(object):
-    '''
-    CORS is an HTTP feature that enables a web application running under one domain 
-    to access resources in another domain. Web browsers implement a security 
-    restriction known as same-origin policy that prevents a web page from calling 
-    APIs in a different domain; CORS provides a secure way to allow one domain 
-    (the origin domain) to call APIs in another domain. 
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
-                 exposed_headers=None, allowed_headers=None):
-        '''
-        :param allowed_origins: 
-            A list of origin domains that will be allowed via CORS, or "*" to allow 
-            all domains. The list of must contain at least one entry. Limited to 64 
-            origin domains. Each allowed origin can have up to 256 characters.
-        :type allowed_origins: list(str)
-        :param allowed_methods:
-            A list of HTTP methods that are allowed to be executed by the origin. 
-            The list of must contain at least one entry. For Azure Storage, 
-            permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-        :type allowed_methods: list(str)
-        :param int max_age_in_seconds:
-            The number of seconds that the client/browser should cache a 
-            preflight response.
-        :param exposed_headers:
-            Defaults to an empty list. A list of response headers to expose to CORS 
-            clients. Limited to 64 defined headers and two prefixed headers. Each 
-            header can be up to 256 characters.
-        :type exposed_headers: list(str)
-        :param allowed_headers:
-            Defaults to an empty list. A list of headers allowed to be part of 
-            the cross-origin request. Limited to 64 defined headers and 2 prefixed 
-            headers. Each header can be up to 256 characters.
-        :type allowed_headers: list(str)
-        '''
-        _validate_not_none("allowed_origins", allowed_origins)
-        _validate_not_none("allowed_methods", allowed_methods)
-        _validate_not_none("max_age_in_seconds", max_age_in_seconds)
-
-        self.allowed_origins = allowed_origins if allowed_origins else list()
-        self.allowed_methods = allowed_methods if allowed_methods else list()
-        self.max_age_in_seconds = max_age_in_seconds
-        self.exposed_headers = exposed_headers if exposed_headers else list()
-        self.allowed_headers = allowed_headers if allowed_headers else list()
-
-
-class ServiceProperties(object):
-    ''' 
-    Returned by get_*_service_properties functions. Contains the properties of a 
-    storage service, including Analytics and CORS rules.
-
-    Azure Storage Analytics performs logging and provides metrics data for a storage 
-    account. You can use this data to trace requests, analyze usage trends, and 
-    diagnose issues with your storage account. To use Storage Analytics, you must 
-    enable it individually for each service you want to monitor.
-
-    The aggregated data is stored in a well-known blob (for logging) and in well-known 
-    tables (for metrics), which may be accessed using the Blob service and Table 
-    service APIs.
-
-    For an in-depth guide on using Storage Analytics and other tools to identify, 
-    diagnose, and troubleshoot Azure Storage-related issues, see 
-    http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
-
-    For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    pass
-
-
-class ServiceStats(object):
-    ''' 
-    Returned by get_*_service_stats functions. Contains statistics related to 
-    replication for the given service. It is only available when read-access 
-    geo-redundant replication is enabled for the storage account.
-
-    :ivar GeoReplication geo_replication:
-        An object containing statistics related to replication for the given service.
-    '''
-    pass
-
-
-class GeoReplication(object):
-    ''' 
-    Contains statistics related to replication for the given service.
-
-    :ivar str status:
-        The status of the secondary location. Possible values are:
-            live: Indicates that the secondary location is active and operational.
-            bootstrap: Indicates initial synchronization from the primary location 
-            to the secondary location is in progress. This typically occurs 
-            when replication is first enabled.
-            unavailable: Indicates that the secondary location is temporarily 
-            unavailable.
-    :ivar date last_sync_time:
-        A GMT date value, to the second. All primary writes preceding this value 
-        are guaranteed to be available for read operations at the secondary. 
-        Primary writes after this point in time may or may not be available for 
-        reads. The value may be empty if LastSyncTime is not available. This can 
-        happen if the replication status is bootstrap or unavailable. Although 
-        geo-replication is continuously enabled, the LastSyncTime result may 
-        reflect a cached value from the service that is refreshed every few minutes.
-    '''
-    pass
-
-
-class AccessPolicy(object):
-    '''
-    Access Policy class used by the set and get acl methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and 
-    permissions for the Shared Access Signatures with which it's associated. 
-    Depending on how you want to control access to your table resource, you can 
-    specify all of these parameters within the stored access policy, and omit 
-    them from the URL for the Shared Access Signature. Doing so permits you to 
-    modify the associated signature's behavior at any time, as well as to revoke 
-    it. Or you can specify one or more of the access policy parameters within 
-    the stored access policy, and the others on the URL. Finally, you can 
-    specify all of the parameters on the URL. In this case, you can use the 
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must 
-    include all fields required to authenticate the signature. If any required 
-    fields are missing, the request will fail. Likewise, if a field is specified 
-    both in the Shared Access Signature URL and in the stored access policy, the 
-    request will fail with status code 400 (Bad Request).
-    '''
-
-    def __init__(self, permission=None, expiry=None, start=None):
-        '''
-        :param str permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        '''
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class Protocol(object):
-    '''
-    Specifies the protocol permitted for a SAS token. Note that HTTP only is 
-    not allowed.
-    '''
-
-    HTTPS = 'https'
-    ''' Allow HTTPS requests only. '''
-
-    HTTPS_HTTP = 'https,http'
-    ''' Allow HTTP and HTTPS requests. '''
-
-
-class ResourceTypes(object):
-    '''
-    Specifies the resource types that are accessible with the account SAS.
-
-    :ivar ResourceTypes ResourceTypes.CONTAINER:
-        Access to container-level APIs (e.g., Create/Delete Container, 
-        Create/Delete Queue, Create/Delete Table, Create/Delete Share, 
-        List Blobs/Files and Directories) 
-    :ivar ResourceTypes ResourceTypes.OBJECT:
-        Access to object-level APIs for blobs, queue messages, table entities, and 
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-    :ivar ResourceTypes ResourceTypes.SERVICE:
-        Access to service-level APIs (e.g., Get/Set Service Properties, 
-        Get Service Stats, List Containers/Queues/Tables/Shares) 
-    '''
-
-    def __init__(self, service=False, container=False, object=False, _str=None):
-        '''
-        :param bool service:
-            Access to service-level APIs (e.g., Get/Set Service Properties, 
-            Get Service Stats, List Containers/Queues/Tables/Shares) 
-        :param bool container:
-            Access to container-level APIs (e.g., Create/Delete Container, 
-            Create/Delete Queue, Create/Delete Table, Create/Delete Share, 
-            List Blobs/Files and Directories) 
-        :param bool object:
-            Access to object-level APIs for blobs, queue messages, table entities, and 
-            files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-        :param str _str: 
-            A string representing the resource types.
-        '''
-        if not _str:
-            _str = ''
-        self.service = service or ('s' in _str)
-        self.container = container or ('c' in _str)
-        self.object = object or ('o' in _str)
-
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-
-ResourceTypes.SERVICE = ResourceTypes(service=True)
-ResourceTypes.CONTAINER = ResourceTypes(container=True)
-ResourceTypes.OBJECT = ResourceTypes(object=True)
-
-
-class Services(object):
-    '''
-    Specifies the services accessible with the account SAS.
-
-    :ivar Services Services.BLOB: The blob service.
-    :ivar Services Services.FILE: The file service
-    :ivar Services Services.QUEUE: The queue service.
-    :ivar Services Services.TABLE: The table service
-    '''
-
-    def __init__(self, blob=False, queue=False, table=False, file=False, _str=None):
-        '''
-        :param bool blob:
-            Access to any blob service, for example, the `.BlockBlobService`
-        :param bool queue:
-            Access to the `.QueueService`
-        :param bool table:
-            Access to the `.TableService`
-        :param bool file:
-            Access to the `.FileService`
-        :param str _str: 
-            A string representing the services.
-        '''
-        if not _str:
-            _str = ''
-        self.blob = blob or ('b' in _str)
-        self.queue = queue or ('q' in _str)
-        self.table = table or ('t' in _str)
-        self.file = file or ('f' in _str)
-
-    def __or__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('t' if self.table else '') +
-                ('f' if self.file else ''))
-
-
-Services.BLOB = Services(blob=True)
-Services.QUEUE = Services(queue=True)
-Services.TABLE = Services(table=True)
-Services.FILE = Services(file=True)
-
-
-class AccountPermissions(object):
-    '''
-    :class:`~ResourceTypes` class to be used with generate_shared_access_signature 
-    method and for the AccessPolicies used with set_*_acl. There are two types of 
-    SAS which may be used to grant resource access. One is to grant access to a 
-    specific resource (resource-specific). Another is to grant access to the 
-    entire service for a specific account and allow certain operations based on 
-    perms found here.
-
-    :ivar AccountPermissions AccountPermissions.ADD:
-        Valid for the following Object resource types only: queue messages, table 
-        entities, and append blobs. 
-    :ivar AccountPermissions AccountPermissions.CREATE:
-        Valid for the following Object resource types only: blobs and files. Users 
-        can create new blobs or files, but may not overwrite existing blobs or files. 
-    :ivar AccountPermissions AccountPermissions.DELETE:
-        Valid for Container and Object resource types, except for queue messages. 
-    :ivar AccountPermissions AccountPermissions.LIST:
-        Valid for Service and Container resource types only. 
-    :ivar AccountPermissions AccountPermissions.PROCESS:
-        Valid for the following Object resource type only: queue messages. 
-    :ivar AccountPermissions AccountPermissions.READ:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits read permissions to the specified resource type. 
-    :ivar AccountPermissions AccountPermissions.UPDATE:
-        Valid for the following Object resource types only: queue messages and table 
-        entities. 
-    :ivar AccountPermissions AccountPermissions.WRITE:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits write permissions to the specified resource type. 
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 add=False, create=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits read permissions to the specified resource type.
-        :param bool write:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits write permissions to the specified resource type.
-        :param bool delete: 
-            Valid for Container and Object resource types, except for queue messages.
-        :param bool list:
-            Valid for Service and Container resource types only.
-        :param bool add:
-            Valid for the following Object resource types only: queue messages, 
-            table entities, and append blobs.
-        :param bool create:
-            Valid for the following Object resource types only: blobs and files. 
-            Users can create new blobs or files, but may not overwrite existing 
-            blobs or files.
-        :param bool update:
-            Valid for the following Object resource types only: queue messages and 
-            table entities.
-        :param bool process:
-            Valid for the following Object resource type only: queue messages.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-AccountPermissions.READ = AccountPermissions(read=True)
-AccountPermissions.WRITE = AccountPermissions(write=True)
-AccountPermissions.DELETE = AccountPermissions(delete=True)
-AccountPermissions.LIST = AccountPermissions(list=True)
-AccountPermissions.ADD = AccountPermissions(add=True)
-AccountPermissions.CREATE = AccountPermissions(create=True)
-AccountPermissions.UPDATE = AccountPermissions(update=True)
-AccountPermissions.PROCESS = AccountPermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/retry.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/retry.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/retry.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/retry.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,267 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from abc import ABCMeta
-from math import pow
-
-from .models import LocationMode
-
-
-class _Retry(object):
-    '''
-    The base class for Exponential and Linear retries containing shared code.
-    '''
-    __metaclass__ = ABCMeta
-
-    def __init__(self, max_attempts, retry_to_secondary):
-        '''
-        Constructs a base retry object.
-
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        '''
-        self.max_attempts = max_attempts
-        self.retry_to_secondary = retry_to_secondary
-
-    def _should_retry(self, context):
-        '''
-        A function which determines whether or not to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :return: 
-            A boolean indicating whether or not to retry the request.
-        :rtype: bool
-        '''
-        # If max attempts are reached, do not retry.
-        if context.count >= self.max_attempts:
-            return False
-
-        status = None
-        if context.response and context.response.status:
-            status = context.response.status
-
-        if status is None:
-            '''
-            If status is None, retry as this request triggered an exception. For 
-            example, network issues would trigger this.
-            '''
-            return True
-        elif 200 <= status < 300:
-            '''
-            This method is called after a successful response, meaning we failed 
-            during the response body download or parsing. So, success codes should 
-            be retried.
-            '''
-            return True
-        elif 300 <= status < 500:
-            '''
-            An exception occured, but in most cases it was expected. Examples could 
-            include a 309 Conflict or 412 Precondition Failed.
-            '''
-            if status == 404 and context.location_mode == LocationMode.SECONDARY:
-                # Response code 404 should be retried if secondary was used.
-                return True
-            if status == 408:
-                # Response code 408 is a timeout and should be retried.
-                return True
-            return False
-        elif status >= 500:
-            '''
-            Response codes above 500 with the exception of 501 Not Implemented and 
-            505 Version Not Supported indicate a server issue and should be retried.
-            '''
-            if status == 501 or status == 505:
-                return False
-            return True
-        else:
-            # If something else happened, it's unexpected. Retry.
-            return True
-
-    def _set_next_host_location(self, context):
-        '''
-        A function which sets the next host location on the request, if applicable. 
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context containing the previous host location and the request 
-            to evaluate and possibly modify.
-        '''
-        if len(context.request.host_locations) > 1:
-            # If there's more than one possible location, retry to the alternative
-            if context.location_mode == LocationMode.PRIMARY:
-                context.location_mode = LocationMode.SECONDARY
-            else:
-                context.location_mode = LocationMode.PRIMARY
-
-            context.request.host = context.request.host_locations.get(context.location_mode)
-
-    def _retry(self, context, backoff):
-        '''
-        A function which determines whether and how to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :param function() backoff:
-            A function which returns the backoff time if a retry is to be performed.
-        :return: 
-            An integer indicating how long to wait before retrying the request, 
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        '''
-        # If the context does not contain a count parameter, this request has not 
-        # been retried yet. Add the count parameter to track the number of retries.
-        if not hasattr(context, 'count'):
-            context.count = 0
-
-        # Determine whether to retry, and if so increment the count, modify the 
-        # request as desired, and return the backoff.
-        if self._should_retry(context):
-            backoff_interval = backoff(context)
-            context.count += 1
-
-            # If retry to secondary is enabled, attempt to change the host if the 
-            # request allows it
-            if self.retry_to_secondary:
-                self._set_next_host_location(context)
-
-            return backoff_interval
-
-        return None
-
-
-class ExponentialRetry(_Retry):
-    '''
-    Exponential retry.
-    '''
-
-    def __init__(self, initial_backoff=15, increment_power=3, max_attempts=3,
-                 retry_to_secondary=False):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for 
-        the first retry. Subsequent retries are retried after initial_backoff + 
-        increment_power^retry_count seconds. For example, by default the first retry 
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the 
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff: 
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_power:
-            The base, in seconds, to increment the initial_backoff by after the 
-            first retry.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_power = increment_power
-        super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        return self.initial_backoff + (0 if context.count == 0 else pow(self.increment_power, context.count))
-
-
-class LinearRetry(_Retry):
-    '''
-    Linear retry.
-    '''
-
-    def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False):
-        '''
-        Constructs a Linear retry object.
-
-        :param int backoff: 
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        '''
-        self.backoff = backoff
-        self.max_attempts = max_attempts
-        super(LinearRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        return self.backoff
-
-
-def no_retry(context):
-    '''
-    Specifies never to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context.
-    :return: 
-        Always returns None to indicate never to retry.
-    :rtype: None
-    '''
-    return None
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/sharedaccesssignature.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,315 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from datetime import date
-
-from ._common_conversion import (
-    _sign_string,
-    _to_str,
-)
-from ._constants import X_MS_VERSION
-from ._serialization import (
-    url_quote,
-    _to_utc_datetime,
-)
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating table access
-    signature tokens with a common account name and account key.  Users can either 
-    use the factory or can construct the appropriate service and use the 
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to genenerate the shares access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-
-    def generate_table(self, table_name, permission=None,
-                       expiry=None, start=None, id=None,
-                       ip=None, protocol=None,
-                       start_pk=None, start_rk=None,
-                       end_pk=None, end_rk=None):
-        '''
-        Generates a shared access signature for the table.
-        Use the returned signature with the sas_token parameter of TableService.
-
-        :param str table_name:
-            Name of table.
-        :param TablePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_table_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str start_pk:
-            The minimum partition key accessible with this shared access 
-            signature. startpk must accompany startrk. Key values are inclusive. 
-            If omitted, there is no lower bound on the table entities that can 
-            be accessed.
-        :param str start_rk:
-            The minimum row key accessible with this shared access signature. 
-            startpk must accompany startrk. Key values are inclusive. If 
-            omitted, there is no lower bound on the table entities that can be 
-            accessed.
-        :param str end_pk:
-            The maximum partition key accessible with this shared access 
-            signature. endpk must accompany endrk. Key values are inclusive. If 
-            omitted, there is no upper bound on the table entities that can be 
-            accessed.
-        :param str end_rk:
-            The maximum row key accessible with this shared access signature. 
-            endpk must accompany endrk. Key values are inclusive. If omitted, 
-            there is no upper bound on the table entities that can be accessed.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk)
-
-        # Table names must be signed lower case
-        resource_path = table_name.lower()
-        sas.add_resource_signature(self.account_name, self.account_key, 'table', resource_path)
-
-        return sas.get_token()
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    TABLE_NAME = 'tn'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _to_str(val)
-
-    def add_base(self, permission, expiry, start, ip, protocol):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(_QueryStringConstants.SIGNED_START, start)
-        self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(_QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(_QueryStringConstants.SIGNED_VERSION, X_MS_VERSION)
-
-    def add_resource(self, resource):
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, id):
-        self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_table_access_ranges(self, table_name, start_pk, start_rk,
-                                end_pk, end_rk):
-        self._add_query(_QueryStringConstants.TABLE_NAME, table_name)
-        self._add_query(_QueryStringConstants.START_PK, start_pk)
-        self._add_query(_QueryStringConstants.START_RK, start_rk)
-        self._add_query(_QueryStringConstants.END_PK, end_pk)
-        self._add_query(_QueryStringConstants.END_RK, end_rk)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_resource_signature(self, account_name, account_key, service, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        if service == 'blob' or service == 'file':
-            string_to_sign += \
-                (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        if service == 'table':
-            string_to_sign += \
-                (get_value_to_append(_QueryStringConstants.START_PK) +
-                 get_value_to_append(_QueryStringConstants.START_RK) +
-                 get_value_to_append(_QueryStringConstants.END_PK) +
-                 get_value_to_append(_QueryStringConstants.END_RK))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/storageclient.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/storageclient.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/storageclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/common/storageclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,355 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import sys
-from abc import ABCMeta
-import logging
-
-logger = logging.getLogger(__name__)
-from time import sleep
-
-import requests
-from azure.common import (
-    AzureException,
-)
-
-from ._constants import (
-    DEFAULT_SOCKET_TIMEOUT
-)
-from ._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _http_error_handler,
-)
-from ._http import HTTPError
-from ._http.httpclient import _HTTPClient
-from ._serialization import (
-    _update_request,
-    _add_date_header,
-)
-from .models import (
-    RetryContext,
-    LocationMode,
-    _OperationContext,
-)
-from .retry import ExponentialRetry
-
-
-class StorageClient(object):
-    '''
-    This is the base class for service objects. Service objects are used to do 
-    all requests to Storage. This class cannot be instantiated directly.
-
-    :ivar str account_name:
-        The storage account name. This is used to authenticate requests 
-        signed with an account key and to construct the storage endpoint. It 
-        is required unless a connection string is given, or if a custom 
-        domain is used with anonymous authentication.
-    :ivar str account_key:
-        The storage account key. This is used for shared key authentication. 
-        If neither account key or sas token is specified, anonymous access 
-        will be used.
-    :ivar str sas_token:
-        A shared access signature token to use to authenticate requests 
-        instead of the account key. If account key and sas token are both 
-        specified, account key will be used to sign. If neither are 
-        specified, anonymous access will be used.
-    :ivar str primary_endpoint:
-        The endpoint to send storage requests to.
-    :ivar str secondary_endpoint:
-        The secondary endpoint to read storage data from. This will only be a 
-        valid endpoint if the storage account used is RA-GRS and thus allows 
-        reading from secondary.
-    :ivar function(context) retry:
-        A function which determines whether to retry. Takes as a parameter a 
-        :class:`~azure.storage.common.models.RetryContext` object. Returns the number
-        of seconds to wait before retrying the request, or None to indicate not 
-        to retry.
-    :ivar ~azure.storage.common.models.LocationMode location_mode:
-        The host location to use to make requests. Defaults to LocationMode.PRIMARY.
-        Note that this setting only applies to RA-GRS accounts as other account 
-        types do not allow reading from secondary. If the location_mode is set to 
-        LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. 
-        Write requests will continue to be sent to primary.
-    :ivar str protocol:
-        The protocol to use for requests. Defaults to https.
-    :ivar requests.Session request_session:
-        The session object to use for http requests.
-    :ivar function(request) request_callback:
-        A function called immediately before each request is sent. This function 
-        takes as a parameter the request object and returns nothing. It may be 
-        used to added custom headers or log request data.
-    :ivar function() response_callback:
-        A function called immediately after each response is received. This 
-        function takes as a parameter the response object and returns nothing. 
-        It may be used to log response data.
-    :ivar function() retry_callback:
-        A function called immediately after retry evaluation is performed. This 
-        function takes as a parameter the retry context object and returns nothing. 
-        It may be used to detect retries and log context information.
-    '''
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, connection_params):
-        '''
-        :param obj connection_params: The parameters to use to construct the client.
-        '''
-        self.account_name = connection_params.account_name
-        self.account_key = connection_params.account_key
-        self.sas_token = connection_params.sas_token
-        self.is_emulated = connection_params.is_emulated
-
-        self.primary_endpoint = connection_params.primary_endpoint
-        self.secondary_endpoint = connection_params.secondary_endpoint
-
-        protocol = connection_params.protocol
-        request_session = connection_params.request_session or requests.Session()
-        socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT
-        self._httpclient = _HTTPClient(
-            protocol=protocol,
-            session=request_session,
-            timeout=socket_timeout,
-        )
-
-        self.retry = ExponentialRetry().retry
-        self.location_mode = LocationMode.PRIMARY
-
-        self.request_callback = None
-        self.response_callback = None
-        self.retry_callback = None
-
-    @property
-    def socket_timeout(self):
-        return self._httpclient.timeout
-
-    @socket_timeout.setter
-    def socket_timeout(self, value):
-        self._httpclient.timeout = value
-
-    @property
-    def protocol(self):
-        return self._httpclient.protocol
-
-    @protocol.setter
-    def protocol(self, value):
-        self._httpclient.protocol = value
-
-    @property
-    def request_session(self):
-        return self._httpclient.session
-
-    @request_session.setter
-    def request_session(self, value):
-        self._httpclient.session = value
-
-    def set_proxy(self, host, port, user=None, password=None):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        :param str host: Address of the proxy. Ex: '192.168.0.100'
-        :param int port: Port of the proxy. Ex: 6000
-        :param str user: User for proxy authorization.
-        :param str password: Password for proxy authorization.
-        '''
-        self._httpclient.set_proxy(host, port, user, password)
-
-    def _get_host_locations(self, primary=True, secondary=False):
-        locations = {}
-        if primary:
-            locations[LocationMode.PRIMARY] = self.primary_endpoint
-        if secondary:
-            locations[LocationMode.SECONDARY] = self.secondary_endpoint
-        return locations
-
-    def _apply_host(self, request, operation_context, retry_context):
-        if operation_context.location_lock and operation_context.host_location:
-            # If this is a location locked operation and the location is set, 
-            # override the request location and host_location.
-            request.host_locations = operation_context.host_location
-            request.host = list(operation_context.host_location.values())[0]
-            retry_context.location_mode = list(operation_context.host_location.keys())[0]
-        elif len(request.host_locations) == 1:
-            # If only one location is allowed, use that location.
-            request.host = list(request.host_locations.values())[0]
-            retry_context.location_mode = list(request.host_locations.keys())[0]
-        else:
-            # If multiple locations are possible, choose based on the location mode.
-            request.host = request.host_locations.get(self.location_mode)
-            retry_context.location_mode = self.location_mode
-
-    @staticmethod
-    def extract_date_and_request_id(retry_context):
-        if getattr(retry_context, 'response', None) is None:
-            return ""
-        resp = retry_context.response
-
-        if 'date' in resp.headers and 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Timestamp={0}, Server-Request-ID={1}",
-                              resp.headers['date'], resp.headers['x-ms-request-id'])
-        elif 'date' in resp.headers:
-            return str.format("Server-Timestamp={0}", resp.headers['date'])
-        elif 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id'])
-        else:
-            return ""
-
-    def _perform_request(self, request, parser=None, parser_args=None, operation_context=None):
-        '''
-        Sends the request and return response. Catches HTTPError and hands it
-        to error handler
-        '''
-        operation_context = operation_context or _OperationContext()
-        retry_context = RetryContext()
-
-        # Apply the appropriate host based on the location mode
-        self._apply_host(request, operation_context, retry_context)
-
-        # Apply common settings to the request
-        _update_request(request)
-        client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
-
-        while True:
-            try:
-                try:
-                    # Execute the request callback 
-                    if self.request_callback:
-                        self.request_callback(request)
-
-                    # Add date and auth after the callback so date doesn't get too old and 
-                    # authentication is still correct if signed headers are added in the request 
-                    # callback. This also ensures retry policies with long back offs 
-                    # will work as it resets the time sensitive headers.
-                    _add_date_header(request)
-                    self.authentication.sign_request(request)
-
-                    # Set the request context
-                    retry_context.request = request
-
-                    # Log the request before it goes out
-                    logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                request.method,
-                                request.path,
-                                request.query,
-                                str(request.headers).replace('\n', ''))
-
-                    # Perform the request
-                    response = self._httpclient.perform_request(request)
-
-                    # Execute the response callback
-                    if self.response_callback:
-                        self.response_callback(response)
-
-                    # Set the response context
-                    retry_context.response = response
-
-                    # Log the response when it comes back
-                    logger.info("%s Receiving Response: "
-                                "%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                self.extract_date_and_request_id(retry_context),
-                                response.status,
-                                response.message,
-                                str(request.headers).replace('\n', ''))
-
-                    # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
-                    if response.status >= 300:
-                        # This exception will be caught by the general error handler
-                        # and raised as an azure http exception
-                        _http_error_handler(
-                            HTTPError(response.status, response.message, response.headers, response.body))
-
-                    # Parse the response
-                    if parser:
-                        if parser_args:
-                            args = [response]
-                            args.extend(parser_args)
-                            return parser(*args)
-                        else:
-                            return parser(response)
-                    else:
-                        return
-                except AzureException as ex:
-                    raise ex
-                except Exception as ex:
-                    if sys.version_info >= (3,):
-                        # Automatic chaining in Python 3 means we keep the trace
-                        raise AzureException(ex.args[0])
-                    else:
-                        # There isn't a good solution in 2 for keeping the stack trace 
-                        # in general, or that will not result in an error in 3
-                        # However, we can keep the previous error type and message
-                        # TODO: In the future we will log the trace
-                        msg = ""
-                        if len(ex.args) > 0:
-                            msg = ex.args[0]
-                        raise AzureException('{}: {}'.format(ex.__class__.__name__, msg))
-
-            except AzureException as ex:
-                # only parse the strings used for logging if logging is at least enabled for CRITICAL
-                if logger.isEnabledFor(logging.CRITICAL):
-                    exception_str_in_one_line = str(ex).replace('\n', '')
-                    status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
-                    timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
-
-                logger.info("%s Operation failed: checking if the operation should be retried. "
-                            "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
-                            client_request_id_prefix,
-                            retry_context.count if hasattr(retry_context, 'count') else 0,
-                            timestamp_and_request_id,
-                            status_code,
-                            exception_str_in_one_line)
-
-                # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
-                # will not be resolved with retries.
-                if str(ex) == _ERROR_DECRYPTION_FAILURE:
-                    logger.error("%s Encountered decryption failure: this cannot be retried. "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-
-                # Determine whether a retry should be performed and if so, how 
-                # long to wait before performing retry.
-                retry_interval = self.retry(retry_context)
-                if retry_interval is not None:
-                    # Execute the callback
-                    if self.retry_callback:
-                        self.retry_callback(retry_context)
-
-                    logger.info(
-                        "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
-                        client_request_id_prefix,
-                        retry_context.count,
-                        retry_interval)
-
-                    # Sleep for the desired retry interval
-                    sleep(retry_interval)
-                else:
-                    logger.error("%s Retry policy did not allow for a retry: "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-            finally:
-                # If this is a location locked operation and the location is not set, 
-                # this is the first request of that operation. Set the location to 
-                # be used for subsequent requests in the operation.
-                if operation_context.location_lock and not operation_context.host_location:
-                    operation_context.host_location = {retry_context.location_mode: request.host}
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/__init__.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/__init__.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from .models import (
-    Entity,
-    EntityProperty,
-    Table,
-    TablePermissions,
-    TablePayloadFormat,
-    EdmType,
-    AzureBatchOperationError,
-    AzureBatchValidationError,
-)
-from .tablebatch import TableBatch
-from .tableservice import TableService
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_deserialization.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_deserialization.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,349 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-from dateutil import parser
-
-if sys.version_info < (3,):
-    from urllib2 import quote as url_quote
-else:
-    from urllib.parse import quote as url_quote
-from json import (
-    loads,
-)
-from ..common._http import HTTPResponse
-from azure.common import (
-    AzureException,
-)
-from ..common._common_conversion import (
-    _decode_base64_to_bytes,
-)
-from ..common._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _validate_decryption_required,
-)
-from ._error import (
-    _ERROR_TYPE_NOT_SUPPORTED,
-    _ERROR_INVALID_PROPERTY_RESOLVER,
-)
-from .models import (
-    Entity,
-    EntityProperty,
-    Table,
-    EdmType,
-    AzureBatchOperationError,
-)
-from ..common.models import (
-    _list,
-)
-from ._encryption import (
-    _decrypt_entity,
-    _extract_encryption_metadata,
-)
-
-
-def _get_continuation_from_response_headers(response):
-    marker = {}
-    for name, value in response.headers.items():
-        if name.startswith('x-ms-continuation'):
-            marker[name[len('x-ms-continuation') + 1:]] = value
-    return marker
-
-
-# Tables of conversions to and from entity types.  We support specific
-# datatypes, and beyond that the user can use an EntityProperty to get
-# custom data type support.
-
-def _from_entity_binary(value):
-    return EntityProperty(EdmType.BINARY, _decode_base64_to_bytes(value))
-
-
-def _from_entity_int32(value):
-    return EntityProperty(EdmType.INT32, int(value))
-
-
-def _from_entity_datetime(value):
-    # Note that Azure always returns UTC datetime, and dateutil parser
-    # will set the tzinfo on the date it returns
-    return parser.parse(value)
-
-
-_EDM_TYPES = [EdmType.BINARY, EdmType.INT64, EdmType.GUID, EdmType.DATETIME,
-              EdmType.STRING, EdmType.INT32, EdmType.DOUBLE, EdmType.BOOLEAN]
-
-_ENTITY_TO_PYTHON_CONVERSIONS = {
-    EdmType.BINARY: _from_entity_binary,
-    EdmType.INT32: _from_entity_int32,
-    EdmType.INT64: int,
-    EdmType.DOUBLE: float,
-    EdmType.DATETIME: _from_entity_datetime,
-}
-
-
-def _convert_json_response_to_entity(response, property_resolver, require_encryption,
-                                     key_encryption_key, key_resolver):
-    '''
-    :param bool require_encryption:
-        If set, will enforce that the retrieved entity is encrypted and decrypt it.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the 
-        string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-     :param function key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing
-        the interface defined above.
-    '''
-    if response is None or response.body is None:
-        return None
-
-    root = loads(response.body.decode('utf-8'))
-    return _decrypt_and_deserialize_entity(root, property_resolver, require_encryption,
-                                           key_encryption_key, key_resolver)
-
-
-def _convert_json_to_entity(entry_element, property_resolver, encrypted_properties):
-    ''' Convert json response to entity.
-
-    The entity format is:
-    {
-       "Address":"Mountain View",
-       "Age":23,
-       "AmountDue":200.23,
-       "CustomerCode@odata.type":"Edm.Guid",
-       "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
-       "CustomerSince@odata.type":"Edm.DateTime",
-       "CustomerSince":"2008-07-10T00:00:00",
-       "IsActive":true,
-       "NumberOfOrders@odata.type":"Edm.Int64",
-       "NumberOfOrders":"255",
-       "PartitionKey":"mypartitionkey",
-       "RowKey":"myrowkey"
-    }
-    '''
-    entity = Entity()
-
-    properties = {}
-    edmtypes = {}
-    odata = {}
-
-    for name, value in entry_element.items():
-        if name.startswith('odata.'):
-            odata[name[6:]] = value
-        elif name.endswith('@odata.type'):
-            edmtypes[name[:-11]] = value
-        else:
-            properties[name] = value
-
-    # Partition key is a known property
-    partition_key = properties.pop('PartitionKey', None)
-    if partition_key:
-        entity['PartitionKey'] = partition_key
-
-    # Row key is a known property
-    row_key = properties.pop('RowKey', None)
-    if row_key:
-        entity['RowKey'] = row_key
-
-    # Timestamp is a known property
-    timestamp = properties.pop('Timestamp', None)
-    if timestamp:
-        entity['Timestamp'] = _from_entity_datetime(timestamp)
-
-    for name, value in properties.items():
-        mtype = edmtypes.get(name)
-
-        # use the property resolver if present
-        if property_resolver:
-            # Clients are not expected to resolve these interal fields.
-            # This check avoids unexpected behavior from the user-defined 
-            # property resolver.
-            if not (name == '_ClientEncryptionMetadata1' or name == '_ClientEncryptionMetadata2'):
-                mtype = property_resolver(partition_key, row_key,
-                                          name, value, mtype)
-
-                # throw if the type returned is not a valid edm type
-                if mtype and mtype not in _EDM_TYPES:
-                    raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype))
-
-        # If the property was encrypted, supercede the results of the resolver and set as binary
-        if encrypted_properties is not None and name in encrypted_properties:
-            mtype = EdmType.BINARY
-
-        # Add type for Int32
-        if type(value) is int:
-            mtype = EdmType.INT32
-
-        # no type info, property should parse automatically
-        if not mtype:
-            entity[name] = value
-        else:  # need an object to hold the property
-            conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
-            if conv is not None:
-                try:
-                    property = conv(value)
-                except Exception as e:
-                    # throw if the type returned by the property resolver
-                    # cannot be used in the conversion
-                    if property_resolver:
-                        raise AzureException(
-                            _ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype))
-                    else:
-                        raise e
-            else:
-                property = EntityProperty(mtype, value)
-            entity[name] = property
-
-    # extract etag from entry
-    etag = odata.get('etag')
-    if timestamp:
-        etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"'
-    entity['etag'] = etag
-
-    return entity
-
-
-def _convert_json_response_to_tables(response):
-    ''' Converts the response to tables class.
-    '''
-    if response is None or response.body is None:
-        return None
-
-    tables = _list()
-
-    continuation = _get_continuation_from_response_headers(response)
-    tables.next_marker = continuation.get('nexttablename')
-
-    root = loads(response.body.decode('utf-8'))
-
-    if 'TableName' in root:
-        table = Table()
-        table.name = root['TableName']
-        tables.append(table)
-    else:
-        for element in root['value']:
-            table = Table()
-            table.name = element['TableName']
-            tables.append(table)
-
-    return tables
-
-
-def _convert_json_response_to_entities(response, property_resolver, require_encryption,
-                                       key_encryption_key, key_resolver):
-    ''' Converts the response to tables class.
-    '''
-    if response is None or response.body is None:
-        return None
-
-    entities = _list()
-
-    entities.next_marker = _get_continuation_from_response_headers(response)
-
-    root = loads(response.body.decode('utf-8'))
-
-    for entity in root['value']:
-        entity = _decrypt_and_deserialize_entity(entity, property_resolver, require_encryption,
-                                                 key_encryption_key, key_resolver)
-        entities.append(entity)
-
-    return entities
-
-
-def _decrypt_and_deserialize_entity(entity, property_resolver, require_encryption,
-                                    key_encryption_key, key_resolver):
-    try:
-        _validate_decryption_required(require_encryption, key_encryption_key,
-                                      key_resolver)
-        entity_iv, encrypted_properties, content_encryption_key, isJavaV1 = None, None, None, False
-        if (key_encryption_key is not None) or (key_resolver is not None):
-            entity_iv, encrypted_properties, content_encryption_key, isJavaV1 = \
-                _extract_encryption_metadata(entity, require_encryption, key_encryption_key, key_resolver)
-    except:
-        raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-    entity = _convert_json_to_entity(entity, property_resolver, encrypted_properties)
-
-    if entity_iv is not None and encrypted_properties is not None and \
-                    content_encryption_key is not None:
-        try:
-            entity = _decrypt_entity(entity, encrypted_properties, content_encryption_key,
-                                     entity_iv, isJavaV1)
-        except:
-            raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-    return entity
-
-
-def _extract_etag(response):
-    ''' Extracts the etag from the response headers. '''
-    if response and response.headers:
-        return response.headers.get('etag')
-
-    return None
-
-
-def _parse_batch_response(response):
-    if response is None or response.body is None:
-        return None
-
-    parts = response.body.split(b'--changesetresponse_')
-
-    responses = []
-    for part in parts:
-        httpLocation = part.find(b'HTTP/')
-        if httpLocation > 0:
-            response_part = _parse_batch_response_part(part[httpLocation:])
-            if response_part.status >= 300:
-                _parse_batch_error(response_part)
-            responses.append(_extract_etag(response_part))
-
-    return responses
-
-
-def _parse_batch_response_part(part):
-    lines = part.splitlines()
-
-    # First line is the HTTP status/reason
-    status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
-
-    # Followed by headers and body
-    headers = {}
-    body = b''
-    isBody = False
-    for line in lines[1:]:
-        if line == b'' and not isBody:
-            isBody = True
-        elif isBody:
-            body += line
-        else:
-            headerName, _, headerVal = line.partition(b': ')
-            headers[headerName.lower().decode("utf-8")] = headerVal.decode("utf-8")
-
-    return HTTPResponse(int(status), reason.strip(), headers, body)
-
-
-def _parse_batch_error(part):
-    doc = loads(part.body.decode('utf-8'))
-
-    code = ''
-    message = ''
-    error = doc.get('odata.error')
-    if error:
-        code = error.get('code')
-        if error.get('message'):
-            message = error.get('message').get('value')
-
-    raise AzureBatchOperationError(message, part.status, code)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_encryption.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_encryption.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,300 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import os
-from copy import deepcopy
-from json import (
-    dumps,
-    loads,
-)
-
-from azure.common import AzureException
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.hashes import (
-    Hash,
-    SHA256,
-)
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._common_conversion import (
-    _decode_base64_to_bytes,
-)
-from ..common._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-)
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _dict_to_encryption_data,
-    _generate_AES_CBC_cipher,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm
-)
-from ..common._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-)
-from ._error import (
-    _ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION,
-    _ERROR_ENTITY_NOT_ENCRYPTED
-)
-from .models import (
-    Entity,
-    EntityProperty,
-    EdmType,
-)
-
-
-def _encrypt_entity(entity, key_encryption_key, encryption_resolver):
-    '''
-    Encrypts the given entity using AES256 in CBC mode with 128 bit padding.
-    Will generate a content-encryption-key (cek) to encrypt the properties either
-    stored in an EntityProperty with the 'encrypt' flag set or those
-    specified by the encryption resolver. This cek is then wrapped using the 
-    provided key_encryption_key (kek). Only strings may be encrypted and the
-    result is stored as binary on the service. 
-
-    :param entity:
-        The entity to insert. Could be a dict or an entity object.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function(partition_key, row_key, property_name) encryption_resolver:
-        A function that takes in an entities partition key, row key, and property name and returns 
-        a boolean that indicates whether that property should be encrypted.
-    :return: An entity with both the appropriate properties encrypted and the 
-        encryption data.
-    :rtype: object
-    '''
-
-    _validate_not_none('entity', entity)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    entity_initialization_vector = os.urandom(16)
-
-    encrypted_properties = []
-    encrypted_entity = Entity()
-    for key, value in entity.items():
-        # If the property resolver says it should be encrypted
-        # or it is an EntityProperty with the 'encrypt' property set.
-        if (isinstance(value, EntityProperty) and value.encrypt) or \
-                (encryption_resolver is not None \
-                         and encryption_resolver(entity['PartitionKey'], entity['RowKey'], key)):
-
-            # Only strings can be encrypted and None is not an instance of str.
-            if isinstance(value, EntityProperty):
-                if value.type == EdmType.STRING:
-                    value = value.value
-                else:
-                    raise ValueError(_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION)
-            if not isinstance(value, str):
-                raise ValueError(_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION)
-
-                # Value is now confirmed to hold a valid string value to be encrypted
-            # and should be added to the list of encrypted properties.
-            encrypted_properties.append(key)
-
-            propertyIV = _generate_property_iv(entity_initialization_vector,
-                                               entity['PartitionKey'], entity['RowKey'],
-                                               key, False)
-
-            # Encode the strings for encryption.
-            value = value.encode('utf-8')
-
-            cipher = _generate_AES_CBC_cipher(content_encryption_key, propertyIV)
-
-            # PKCS7 with 16 byte blocks ensures compatibility with AES.
-            padder = PKCS7(128).padder()
-            padded_data = padder.update(value) + padder.finalize()
-
-            # Encrypt the data.
-            encryptor = cipher.encryptor()
-            encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-            # Set the new value of this key to be a binary EntityProperty for proper serialization.
-            value = EntityProperty(EdmType.BINARY, encrypted_data)
-
-        encrypted_entity[key] = value
-
-    encrypted_properties = dumps(encrypted_properties)
-
-    # Generate the metadata iv.
-    metadataIV = _generate_property_iv(entity_initialization_vector,
-                                       entity['PartitionKey'], entity['RowKey'],
-                                       '_ClientEncryptionMetadata2', False)
-
-    encrypted_properties = encrypted_properties.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, metadataIV)
-
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(encrypted_properties) + padder.finalize()
-
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    encrypted_entity['_ClientEncryptionMetadata2'] = EntityProperty(EdmType.BINARY, encrypted_data)
-
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     entity_initialization_vector)
-
-    encrypted_entity['_ClientEncryptionMetadata1'] = dumps(encryption_data)
-    return encrypted_entity
-
-
-def _decrypt_entity(entity, encrypted_properties_list, content_encryption_key, entityIV, isJavaV1):
-    '''
-    Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK 
-    using either the specified KEK or the key returned by the key_resolver. Properties 
-    specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings.
-
-    :param entity:
-        The entity being retrieved and decrypted. Could be a dict or an entity object.
-    :param list encrypted_properties_list:
-        The encrypted list of all the properties that are encrypted.
-    :param bytes[] content_encryption_key:
-        The key used internally to encrypt the entity. Extrated from the entity metadata.
-    :param bytes[] entityIV:
-        The intialization vector used to seed the encryption algorithm. Extracted from the
-        entity metadata.
-    :return: The decrypted entity
-    :rtype: Entity
-    '''
-
-    _validate_not_none('entity', entity)
-
-    decrypted_entity = deepcopy(entity)
-    try:
-        for property in entity.keys():
-            if property in encrypted_properties_list:
-                value = entity[property]
-
-                propertyIV = _generate_property_iv(entityIV,
-                                                   entity['PartitionKey'], entity['RowKey'],
-                                                   property, isJavaV1)
-                cipher = _generate_AES_CBC_cipher(content_encryption_key,
-                                                  propertyIV)
-
-                # Decrypt the property.
-                decryptor = cipher.decryptor()
-                decrypted_data = (decryptor.update(value.value) + decryptor.finalize())
-
-                # Unpad the data.
-                unpadder = PKCS7(128).unpadder()
-                decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-                decrypted_data = decrypted_data.decode('utf-8')
-
-                decrypted_entity[property] = decrypted_data
-
-        decrypted_entity.pop('_ClientEncryptionMetadata1')
-        decrypted_entity.pop('_ClientEncryptionMetadata2')
-        return decrypted_entity
-    except:
-        raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-
-def _extract_encryption_metadata(entity, require_encryption, key_encryption_key, key_resolver):
-    '''
-    Extracts the encryption metadata from the given entity, setting them to be utf-8 strings.
-    If no encryption metadata is present, will return None for all return values unless
-    require_encryption is true, in which case the method will throw.
-
-    :param entity:
-        The entity being retrieved and decrypted. Could be a dict or an entity object.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved entity is encrypted and decrypt it.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the 
-        string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing
-        the interface defined above.
-    :returns: a tuple containing the entity iv, the list of encrypted properties, the entity cek,
-        and whether the entity was encrypted using JavaV1.
-    :rtype: tuple (bytes[], list, bytes[], bool)
-    '''
-    _validate_not_none('entity', entity)
-
-    try:
-        encrypted_properties_list = _decode_base64_to_bytes(entity['_ClientEncryptionMetadata2'])
-        encryption_data = entity['_ClientEncryptionMetadata1']
-        encryption_data = _dict_to_encryption_data(loads(encryption_data))
-    except Exception:
-        # Message did not have properly formatted encryption metadata.
-        if require_encryption:
-            raise ValueError(_ERROR_ENTITY_NOT_ENCRYPTED)
-        else:
-            return None, None, None, None
-
-    if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-
-    # Special check for compatibility with Java V1 encryption protocol.
-    isJavaV1 = (encryption_data.key_wrapping_metadata is None) or \
-               ((encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1) and
-                'EncryptionLibrary' in encryption_data.key_wrapping_metadata and
-                'Java' in encryption_data.key_wrapping_metadata['EncryptionLibrary'])
-
-    metadataIV = _generate_property_iv(encryption_data.content_encryption_IV,
-                                       entity['PartitionKey'], entity['RowKey'],
-                                       '_ClientEncryptionMetadata2', isJavaV1)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, metadataIV)
-
-    # Decrypt the data.
-    decryptor = cipher.decryptor()
-    encrypted_properties_list = decryptor.update(encrypted_properties_list) + decryptor.finalize()
-
-    # Unpad the data.
-    unpadder = PKCS7(128).unpadder()
-    encrypted_properties_list = unpadder.update(encrypted_properties_list) + unpadder.finalize()
-
-    encrypted_properties_list = encrypted_properties_list.decode('utf-8')
-
-    if isJavaV1:
-        # Strip the square braces from the ends and split string into list.
-        encrypted_properties_list = encrypted_properties_list[1:-1]
-        encrypted_properties_list = encrypted_properties_list.split(', ')
-    else:
-        encrypted_properties_list = loads(encrypted_properties_list)
-
-    return encryption_data.content_encryption_IV, encrypted_properties_list, content_encryption_key, isJavaV1
-
-
-def _generate_property_iv(entity_iv, pk, rk, property_name, isJavaV1):
-    '''
-    Uses the entity_iv, partition key, and row key to generate and return
-    the iv for the specified property.
-    '''
-    digest = Hash(SHA256(), default_backend())
-    if not isJavaV1:
-        digest.update(entity_iv +
-                      (rk + pk + property_name).encode('utf-8'))
-    else:
-        digest.update(entity_iv +
-                      (pk + rk + property_name).encode('utf-8'))
-    propertyIV = digest.finalize()
-    return propertyIV[:16]
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_error.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_error.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,76 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_VALUE_NONE_OR_EMPTY,
-)
-
-_ERROR_ATTRIBUTE_MISSING = '\'{0}\' object has no attribute \'{1}\''
-_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
-_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
-_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
-_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
-    'Cannot serialize the specified value ({0}) to an entity.  Please use ' + \
-    'an EntityProperty (which can specify custom types), int, str, bool, ' + \
-    'or datetime.'
-_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
-    'Row Keys should not be the same in a batch operations'
-_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
-    'Partition Key should be the same in a batch operations'
-_ERROR_INVALID_ENTITY_TYPE = 'The entity must be either in dict format or an entity object.'
-_ERROR_INVALID_PROPERTY_RESOLVER = \
-    'The specified property resolver returned an invalid type. Name: {0}, Value: {1}, ' + \
-    'EdmType: {2}'
-_ERROR_PROPERTY_NAME_TOO_LONG = 'The property name exceeds the maximum allowed length.'
-_ERROR_TOO_MANY_ENTITIES_IN_BATCH = \
-    'Batches may only contain 100 operations'
-_ERROR_TOO_MANY_PROPERTIES = 'The entity contains more properties than allowed.'
-_ERROR_TYPE_NOT_SUPPORTED = 'Type not supported when sending data to the service: {0}.'
-_ERROR_VALUE_TOO_LARGE = '{0} is too large to be cast to type {1}.'
-_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION = 'Encryption is only supported for not None strings.'
-_ERROR_ENTITY_NOT_ENCRYPTED = 'Entity was not encrypted.'
-
-
-def _validate_object_has_param(param_name, object):
-    if object.get(param_name) is None:
-        raise ValueError(_ERROR_VALUE_NONE_OR_EMPTY.format(param_name))
-
-
-def _validate_entity(entity, encrypt=None):
-    # Validate entity exists
-    _validate_not_none('entity', entity)
-
-    # Entity inherits from dict, so just validating dict is fine
-    if not isinstance(entity, dict):
-        raise TypeError(_ERROR_INVALID_ENTITY_TYPE)
-
-    # Validate partition key and row key are present
-    _validate_object_has_param('PartitionKey', entity)
-    _validate_object_has_param('RowKey', entity)
-
-    # Two properties are added during encryption. Validate sufficient space
-    max_properties = 255
-    if encrypt:
-        max_properties = max_properties - 2
-
-    # Validate there are not more than 255 properties including Timestamp
-    if (len(entity) > max_properties) or (len(entity) == max_properties and 'Timestamp' not in entity):
-        raise ValueError(_ERROR_TOO_MANY_PROPERTIES)
-
-    # Validate the property names are not too long
-    for propname in entity:
-        if len(propname) > 255:
-            raise ValueError(_ERROR_PROPERTY_NAME_TOO_LONG)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_request.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_request.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_request.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_request.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,202 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import (
-    _to_str,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-)
-from ._encryption import (
-    _encrypt_entity,
-)
-from ._error import (
-    _validate_entity,
-)
-from ._serialization import (
-    _convert_entity_to_json,
-    _DEFAULT_ACCEPT_HEADER,
-    _DEFAULT_CONTENT_TYPE_HEADER,
-    _DEFAULT_PREFER_HEADER,
-)
-
-
-def _get_entity(partition_key, row_key, select, accept):
-    '''
-    Constructs a get entity request.
-    '''
-    _validate_not_none('partition_key', partition_key)
-    _validate_not_none('row_key', row_key)
-    _validate_not_none('accept', accept)
-    request = HTTPRequest()
-    request.method = 'GET'
-    request.headers = {'Accept': _to_str(accept)}
-    request.query = {'$select': _to_str(select)}
-
-    return request
-
-
-def _insert_entity(entity, encryption_required=False,
-                   key_encryption_key=None, encryption_resolver=None):
-    '''
-    Constructs an insert entity request.
-    :param entity:
-        The entity to insert. Could be a dict or an entity object.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function(partition_key, row_key, property_name) encryption_resolver:
-        A function that takes in an entities partition key, row key, and property name and returns 
-        a boolean that indicates whether that property should be encrypted.
-    '''
-    _validate_entity(entity, key_encryption_key is not None)
-    _validate_encryption_required(encryption_required, key_encryption_key)
-
-    request = HTTPRequest()
-    request.method = 'POST'
-    request.headers = {
-        _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
-        _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
-        _DEFAULT_PREFER_HEADER[0]: _DEFAULT_PREFER_HEADER[1]
-    }
-    if key_encryption_key:
-        entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver)
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-
-def _update_entity(entity, if_match, encryption_required=False,
-                   key_encryption_key=None, encryption_resolver=None):
-    '''
-    Constructs an update entity request.
-    :param entity:
-        The entity to insert. Could be a dict or an entity object.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function(partition_key, row_key, property_name) encryption_resolver:
-        A function that takes in an entities partition key, row key, and property name and returns 
-        a boolean that indicates whether that property should be encrypted.
-    '''
-    _validate_not_none('if_match', if_match)
-    _validate_entity(entity, key_encryption_key is not None)
-    _validate_encryption_required(encryption_required, key_encryption_key)
-
-    request = HTTPRequest()
-    request.method = 'PUT'
-    request.headers = {
-        _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
-        _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
-        'If-Match': _to_str(if_match),
-    }
-    if key_encryption_key:
-        entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver)
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-
-def _merge_entity(entity, if_match, require_encryption=False, key_encryption_key=None):
-    '''
-    Constructs a merge entity request.
-    '''
-    _validate_not_none('if_match', if_match)
-    _validate_entity(entity)
-    _validate_encryption_unsupported(require_encryption, key_encryption_key)
-
-    request = HTTPRequest()
-    request.method = 'MERGE'
-    request.headers = {
-        _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
-        _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
-        'If-Match': _to_str(if_match)
-    }
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-
-def _delete_entity(partition_key, row_key, if_match):
-    '''
-     Constructs a delete entity request.
-    '''
-    _validate_not_none('if_match', if_match)
-    _validate_not_none('partition_key', partition_key)
-    _validate_not_none('row_key', row_key)
-    request = HTTPRequest()
-    request.method = 'DELETE'
-    request.headers = {
-        _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
-        'If-Match': _to_str(if_match)
-    }
-
-    return request
-
-
-def _insert_or_replace_entity(entity, require_encryption=False,
-                              key_encryption_key=None, encryption_resolver=None):
-    '''
-    Constructs an insert or replace entity request.
-    '''
-    _validate_entity(entity, key_encryption_key is not None)
-    _validate_encryption_required(require_encryption, key_encryption_key)
-
-    request = HTTPRequest()
-    request.method = 'PUT'
-    request.headers = {
-        _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
-        _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
-    }
-
-    if key_encryption_key:
-        entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver)
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-
-def _insert_or_merge_entity(entity, require_encryption=False, key_encryption_key=None):
-    '''
-    Constructs an insert or merge entity request.
-    :param entity:
-        The entity to insert. Could be a dict or an entity object.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    '''
-    _validate_entity(entity)
-    _validate_encryption_unsupported(require_encryption, key_encryption_key)
-
-    request = HTTPRequest()
-    request.method = 'MERGE'
-    request.headers = {
-        _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
-        _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
-    }
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_serialization.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_serialization.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,266 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-import types
-import uuid
-from datetime import datetime
-from json import (
-    dumps,
-)
-from math import (
-    isnan,
-)
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _to_str,
-)
-from ..common._serialization import (
-    _to_utc_datetime,
-)
-from ._error import (
-    _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
-    _ERROR_TYPE_NOT_SUPPORTED,
-    _ERROR_VALUE_TOO_LARGE,
-)
-from .models import (
-    EntityProperty,
-    TablePayloadFormat,
-    EdmType,
-)
-
-if sys.version_info < (3,):
-    def _new_boundary():
-        return str(uuid.uuid1())
-else:
-    def _new_boundary():
-        return str(uuid.uuid1()).encode('utf-8')
-
-_DEFAULT_ACCEPT_HEADER = ('Accept', TablePayloadFormat.JSON_MINIMAL_METADATA)
-_DEFAULT_CONTENT_TYPE_HEADER = ('Content-Type', 'application/json')
-_DEFAULT_PREFER_HEADER = ('Prefer', 'return-no-content')
-_SUB_HEADERS = ['If-Match', 'Prefer', 'Accept', 'Content-Type', 'DataServiceVersion']
-
-
-def _get_entity_path(table_name, partition_key, row_key):
-    return '/{0}(PartitionKey=\'{1}\',RowKey=\'{2}\')'.format(
-        _to_str(table_name),
-        _to_str(partition_key),
-        _to_str(row_key))
-
-
-def _update_storage_table_header(request):
-    ''' add additional headers for storage table request. '''
-
-    # set service version
-    request.headers['DataServiceVersion'] = '3.0;NetFx'
-    request.headers['MaxDataServiceVersion'] = '3.0'
-
-
-def _to_entity_binary(value):
-    return EdmType.BINARY, _encode_base64(value)
-
-
-def _to_entity_bool(value):
-    return None, value
-
-
-def _to_entity_datetime(value):
-    return EdmType.DATETIME, _to_utc_datetime(value)
-
-
-def _to_entity_float(value):
-    if isnan(value):
-        return EdmType.DOUBLE, 'NaN'
-    if value == float('inf'):
-        return EdmType.DOUBLE, 'Infinity'
-    if value == float('-inf'):
-        return EdmType.DOUBLE, '-Infinity'
-    return None, value
-
-
-def _to_entity_guid(value):
-    return EdmType.GUID, str(value)
-
-
-def _to_entity_int32(value):
-    if sys.version_info < (3,):
-        value = long(value)
-    else:
-        value = int(value)
-    if value >= 2 ** 31 or value < -(2 ** 31):
-        raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT32))
-    return None, value
-
-
-def _to_entity_int64(value):
-    if sys.version_info < (3,):
-        ivalue = long(value)
-    else:
-        ivalue = int(value)
-    if ivalue >= 2 ** 63 or ivalue < -(2 ** 63):
-        raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT64))
-    return EdmType.INT64, str(value)
-
-
-def _to_entity_str(value):
-    return None, value
-
-
-def _to_entity_none(value):
-    return None, None
-
-
-# Conversion from Python type to a function which returns a tuple of the
-# type string and content string.
-_PYTHON_TO_ENTITY_CONVERSIONS = {
-    int: _to_entity_int64,
-    bool: _to_entity_bool,
-    datetime: _to_entity_datetime,
-    float: _to_entity_float,
-    str: _to_entity_str,
-}
-
-# Conversion from Edm type to a function which returns a tuple of the
-# type string and content string.
-_EDM_TO_ENTITY_CONVERSIONS = {
-    EdmType.BINARY: _to_entity_binary,
-    EdmType.BOOLEAN: _to_entity_bool,
-    EdmType.DATETIME: _to_entity_datetime,
-    EdmType.DOUBLE: _to_entity_float,
-    EdmType.GUID: _to_entity_guid,
-    EdmType.INT32: _to_entity_int32,
-    EdmType.INT64: _to_entity_int64,
-    EdmType.STRING: _to_entity_str,
-}
-
-if sys.version_info < (3,):
-    _PYTHON_TO_ENTITY_CONVERSIONS.update({
-        long: _to_entity_int64,
-        types.NoneType: _to_entity_none,
-        unicode: _to_entity_str,
-    })
-
-
-def _convert_entity_to_json(source):
-    ''' Converts an entity object to json to send.
-    The entity format is:
-    {
-       "Address":"Mountain View",
-       "Age":23,
-       "AmountDue":200.23,
-       "CustomerCode@odata.type":"Edm.Guid",
-       "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
-       "CustomerSince@odata.type":"Edm.DateTime",
-       "CustomerSince":"2008-07-10T00:00:00",
-       "IsActive":true,
-       "NumberOfOrders@odata.type":"Edm.Int64",
-       "NumberOfOrders":"255",
-       "PartitionKey":"mypartitionkey",
-       "RowKey":"myrowkey"
-    }
-    '''
-
-    properties = {}
-
-    # set properties type for types we know if value has no type info.
-    # if value has type info, then set the type to value.type
-    for name, value in source.items():
-        mtype = ''
-
-        if isinstance(value, EntityProperty):
-            conv = _EDM_TO_ENTITY_CONVERSIONS.get(value.type)
-            if conv is None:
-                raise TypeError(
-                    _ERROR_TYPE_NOT_SUPPORTED.format(value.type))
-            mtype, value = conv(value.value)
-        else:
-            conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
-            if conv is None and sys.version_info >= (3,) and value is None:
-                conv = _to_entity_none
-            if conv is None:
-                raise TypeError(
-                    _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(
-                        type(value).__name__))
-            mtype, value = conv(value)
-
-        # form the property node
-        properties[name] = value
-        if mtype:
-            properties[name + '@odata.type'] = mtype
-
-    # generate the entity_body
-    return dumps(properties)
-
-
-def _convert_table_to_json(table_name):
-    '''
-    Create json to send for a given table name. Since json format for table is
-    the same as entity and the only difference is that table has only one
-    property 'TableName', so we just call _convert_entity_to_json.
-
-    table_name:
-        the name of the table
-    '''
-    return _convert_entity_to_json({'TableName': table_name})
-
-
-def _convert_batch_to_json(batch_requests):
-    '''
-    Create json to send for an array of batch requests.
-
-    batch_requests:
-        an array of requests
-    '''
-    batch_boundary = b'batch_' + _new_boundary()
-    changeset_boundary = b'changeset_' + _new_boundary()
-
-    body = [b'--' + batch_boundary + b'\n',
-            b'Content-Type: multipart/mixed; boundary=',
-            changeset_boundary + b'\n\n']
-
-    content_id = 1
-
-    # Adds each request body to the POST data.
-    for _, request in batch_requests:
-        body.append(b'--' + changeset_boundary + b'\n')
-        body.append(b'Content-Type: application/http\n')
-        body.append(b'Content-Transfer-Encoding: binary\n\n')
-        body.append(request.method.encode('utf-8'))
-        body.append(b' ')
-        body.append(request.path.encode('utf-8'))
-        body.append(b' HTTP/1.1\n')
-        body.append(b'Content-ID: ')
-        body.append(str(content_id).encode('utf-8') + b'\n')
-        content_id += 1
-
-        for name, value in request.headers.items():
-            if name in _SUB_HEADERS:
-                body.append(name.encode('utf-8') + b': ')
-                body.append(value.encode('utf-8') + b'\n')
-
-        # Add different headers for different request types.
-        if not request.method == 'DELETE':
-            body.append(b'Content-Length: ')
-            body.append(str(len(request.body)).encode('utf-8'))
-            body.append(b'\n\n')
-            body.append(request.body + b'\n')
-
-        body.append(b'\n')
-
-    body.append(b'--' + changeset_boundary + b'--' + b'\n')
-    body.append(b'--' + batch_boundary + b'--')
-
-    return b''.join(body), 'multipart/mixed; boundary=' + batch_boundary.decode('utf-8')
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/models.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/models.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,210 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from azure.common import (
-    AzureException,
-    AzureHttpError,
-)
-
-from ._error import (
-    _ERROR_ATTRIBUTE_MISSING,
-)
-
-
-class AzureBatchValidationError(AzureException):
-    '''
-    Indicates that a batch operation cannot proceed due to invalid input.
-
-    :ivar str message: 
-        A detailed error message indicating the reason for the failure. 
-    '''
-
-
-class AzureBatchOperationError(AzureHttpError):
-    '''
-    Indicates that a batch operation failed.
-    
-    :ivar str message: 
-        A detailed error message indicating the index of the batch 
-        request which failed and the reason for the failure. For example, 
-        '0:One of the request inputs is out of range.' indicates the 0th batch 
-        request failed as one of its property values was out of range.
-    :ivar int status_code: 
-        The HTTP status code of the batch request. For example, 400.
-    :ivar str batch_code: 
-        The batch status code. For example, 'OutOfRangeInput'.
-    '''
-
-    def __init__(self, message, status_code, batch_code):
-        super(AzureBatchOperationError, self).__init__(message, status_code)
-        self.code = batch_code
-
-
-class Entity(dict):
-    '''
-    An entity object. Can be accessed as a dict or as an obj. The attributes of 
-    the entity will be created dynamically. For example, the following are both 
-    valid::
-        entity = Entity()
-        entity.a = 'b'
-        entity['x'] = 'y'
-    '''
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name))
-
-    __setattr__ = dict.__setitem__
-
-    def __delattr__(self, name):
-        try:
-            del self[name]
-        except KeyError:
-            raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name))
-
-    def __dir__(self):
-        return dir({}) + list(self.keys())
-
-
-class EntityProperty(object):
-    '''
-    An entity property. Used to explicitly set :class:`~EdmType` when necessary. 
-    
-    Values which require explicit typing are GUID, INT32, and BINARY. Other EdmTypes
-    may be explicitly create as EntityProperty objects but need not be. For example, 
-    the below with both create STRING typed properties on the entity::
-        entity = Entity()
-        entity.a = 'b'
-        entity.x = EntityProperty(EdmType.STRING, 'y')
-    '''
-
-    def __init__(self, type=None, value=None, encrypt=False):
-        '''
-        Represents an Azure Table. Returned by list_tables.
-
-        :param str type: The type of the property.
-        :param EdmType value: The value of the property.
-        :param bool encrypt: Indicates whether or not the property should be encrypted.
-        '''
-        self.type = type
-        self.value = value
-        self.encrypt = encrypt
-
-
-class Table(object):
-    '''
-    Represents an Azure Table. Returned by list_tables.
-
-    :ivar str name: The name of the table.
-    '''
-    pass
-
-
-class TablePayloadFormat(object):
-    '''
-    Specifies the accepted content type of the response payload. More information
-    can be found here: https://msdn.microsoft.com/en-us/library/azure/dn535600.aspx
-    '''
-
-    JSON_NO_METADATA = 'application/json;odata=nometadata'
-    '''Returns no type information for the entity properties.'''
-
-    JSON_MINIMAL_METADATA = 'application/json;odata=minimalmetadata'
-    '''Returns minimal type information for the entity properties.'''
-
-    JSON_FULL_METADATA = 'application/json;odata=fullmetadata'
-    '''Returns minimal type information for the entity properties plus some extra odata properties.'''
-
-
-class EdmType(object):
-    '''
-    Used by :class:`~.EntityProperty` to represent the type of the entity property 
-    to be stored by the Table service.
-    '''
-
-    BINARY = 'Edm.Binary'
-    ''' Represents byte data. Must be specified. '''
-
-    INT64 = 'Edm.Int64'
-    ''' Represents a number between -(2^31) and 2^31. This is the default type for Python numbers. '''
-
-    GUID = 'Edm.Guid'
-    ''' Represents a GUID. Must be specified. '''
-
-    DATETIME = 'Edm.DateTime'
-    ''' Represents a date. This type will be inferred for Python datetime objects. '''
-
-    STRING = 'Edm.String'
-    ''' Represents a string. This type will be inferred for Python strings. '''
-
-    INT32 = 'Edm.Int32'
-    ''' Represents a number between -(2^15) and 2^15. Must be specified or numbers will default to INT64. '''
-
-    DOUBLE = 'Edm.Double'
-    ''' Represents a double. This type will be inferred for Python floating point numbers. '''
-
-    BOOLEAN = 'Edm.Boolean'
-    ''' Represents a boolean. This type will be inferred for Python bools. '''
-
-
-class TablePermissions(object):
-    '''
-    TablePermissions class to be used with the :func:`~azure.storage.table.tableservice.TableService.generate_table_shared_access_signature`
-    method and for the AccessPolicies used with :func:`~azure.storage.table.tableservice.TableService.set_table_acl`.
-
-    :ivar TablePermissions TablePermissions.QUERY: Get entities and query entities.
-    :ivar TablePermissions TablePermissions.ADD: Add entities.
-    :ivar TablePermissions TablePermissions.UPDATE: Update entities.
-    :ivar TablePermissions TablePermissions.DELETE: Delete entities.
-    '''
-
-    def __init__(self, query=False, add=False, update=False, delete=False, _str=None):
-        '''
-        :param bool query:
-            Get entities and query entities.
-        :param bool add:
-            Add entities. Add and Update permissions are required for upsert operations.
-        :param bool update:
-            Update entities. Add and Update permissions are required for upsert operations.
-        :param bool delete: 
-            Delete entities.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.query = query or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.update = update or ('u' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return TablePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return TablePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.query else '') +
-                ('a' if self.add else '') +
-                ('u' if self.update else '') +
-                ('d' if self.delete else ''))
-
-
-TablePermissions.QUERY = TablePermissions(query=True)
-TablePermissions.ADD = TablePermissions(add=True)
-TablePermissions.UPDATE = TablePermissions(update=True)
-TablePermissions.DELETE = TablePermissions(delete=True)
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tablebatch.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tablebatch.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tablebatch.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tablebatch.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,209 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ._error import (
-    _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
-    _ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
-    _ERROR_TOO_MANY_ENTITIES_IN_BATCH,
-)
-from ._request import (
-    _insert_entity,
-    _update_entity,
-    _merge_entity,
-    _delete_entity,
-    _insert_or_replace_entity,
-    _insert_or_merge_entity,
-)
-from .models import (
-    AzureBatchValidationError,
-)
-
-
-class TableBatch(object):
-    '''
-    This is the class that is used for batch operation for storage table service.
-
-    The Table service supports batch transactions on entities that are in the 
-    same table and belong to the same partition group. Multiple operations are 
-    supported within a single transaction. The batch can include at most 100 
-    entities, and its total payload may be no more than 4 MB in size.
-    '''
-
-    def __init__(self, require_encryption=False, key_encryption_key=None,
-                 encryption_resolver=None):
-        self._requests = []
-        self._partition_key = None
-        self._row_keys = []
-        self._require_encryption = require_encryption
-        self._key_encryption_key = key_encryption_key
-        self._encryption_resolver = encryption_resolver
-
-    def insert_entity(self, entity):
-        '''
-        Adds an insert entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.insert_entity` for more 
-        information on inserts.
-        
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to insert. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        '''
-        request = _insert_entity(entity, self._require_encryption, self._key_encryption_key,
-                                 self._encryption_resolver)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def update_entity(self, entity, if_match='*'):
-        '''
-        Adds an update entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.update_entity` for more 
-        information on updates.
-        
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to update. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The update operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional update, set If-Match to the wildcard character (*).
-        '''
-        request = _update_entity(entity, if_match, self._require_encryption,
-                                 self._key_encryption_key, self._encryption_resolver)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def merge_entity(self, entity, if_match='*'):
-        '''
-        Adds a merge entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more 
-        information on merges.
-        
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The merge operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional merge, set If-Match to the wildcard character (*).
-        '''
-
-        request = _merge_entity(entity, if_match, self._require_encryption,
-                                self._key_encryption_key)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def delete_entity(self, partition_key, row_key,
-                      if_match='*'):
-        '''
-        Adds a delete entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.delete_entity` for more 
-        information on deletes.
-
-        The operation will not be executed until the batch is committed.
-
-        :param str partition_key:
-            The PartitionKey of the entity.
-        :param str row_key:
-            The RowKey of the entity.
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The delete operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional delete, set If-Match to the wildcard character (*).
-        '''
-        request = _delete_entity(partition_key, row_key, if_match)
-        self._add_to_batch(partition_key, row_key, request)
-
-    def insert_or_replace_entity(self, entity):
-        '''
-        Adds an insert or replace entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more 
-        information on insert or replace operations.
-
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to insert or replace. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-       '''
-        request = _insert_or_replace_entity(entity, self._require_encryption, self._key_encryption_key,
-                                            self._encryption_resolver)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def insert_or_merge_entity(self, entity):
-        '''
-        Adds an insert or merge entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.insert_or_merge_entity` for more 
-        information on insert or merge operations.
-
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to insert or merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        '''
-
-        request = _insert_or_merge_entity(entity, self._require_encryption,
-                                          self._key_encryption_key)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def _add_to_batch(self, partition_key, row_key, request):
-        '''
-        Validates batch-specific rules.
-        
-        :param str partition_key:
-            PartitionKey of the entity.
-        :param str row_key:
-            RowKey of the entity.
-        :param request:
-            the request to insert, update or delete entity
-        '''
-        # All same partition keys
-        if self._partition_key:
-            if self._partition_key != partition_key:
-                raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
-        else:
-            self._partition_key = partition_key
-
-        # All different row keys
-        if row_key in self._row_keys:
-            raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
-        else:
-            self._row_keys.append(row_key)
-
-        # 100 entities
-        if len(self._requests) >= 100:
-            raise AzureBatchValidationError(_ERROR_TOO_MANY_ENTITIES_IN_BATCH)
-
-        # Add the request to the batch
-        self._requests.append((row_key, request))
diff -pruN 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tableservice.py 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tableservice.py
--- 1.4.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tableservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/cosmosdb/v2017_04_17/table/tableservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1098 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from contextlib import contextmanager
-
-from azure.common import (
-    AzureHttpError,
-)
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageTableSharedKeyAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _convert_xml_to_service_stats,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_STORAGE_MISSING_INFO,
-    _validate_access_policies,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _update_request,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_json_response_to_entity,
-    _convert_json_response_to_tables,
-    _convert_json_response_to_entities,
-    _parse_batch_response,
-    _extract_etag,
-)
-from ._request import (
-    _get_entity,
-    _insert_entity,
-    _update_entity,
-    _merge_entity,
-    _delete_entity,
-    _insert_or_replace_entity,
-    _insert_or_merge_entity,
-)
-from ._serialization import (
-    _convert_table_to_json,
-    _convert_batch_to_json,
-    _update_storage_table_header,
-    _get_entity_path,
-    _DEFAULT_ACCEPT_HEADER,
-    _DEFAULT_CONTENT_TYPE_HEADER,
-    _DEFAULT_PREFER_HEADER,
-)
-from .models import TablePayloadFormat
-from .tablebatch import TableBatch
-
-
-class TableService(StorageClient):
-    '''
-    This is the main class managing Azure Table resources.
-
-    The Azure Table service offers structured storage in the form of tables. Tables 
-    store data as collections of entities. Entities are similar to rows. An entity 
-    has a primary key and a set of properties. A property is a name, typed-value pair, 
-    similar to a column. The Table service does not enforce any schema for tables, 
-    so two entities in the same table may have different sets of properties. Developers 
-    may choose to enforce a schema on the client side. A table may contain any number 
-    of entities.
-
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar function(partition_key, row_key, property_name) encryption_resolver_functions:
-        A function that takes in an entity's partition key, row key, and property name and returns 
-        a boolean that indicates whether that property should be encrypted.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required 
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'table',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(TableService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageTableSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        self.require_encryption = False
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self.encryption_resolver_function = None
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the table service.
-        Use the returned signature with the sas_token parameter of TableService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.TABLE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_table_shared_access_signature(self, table_name, permission=None,
-                                               expiry=None, start=None, id=None,
-                                               ip=None, protocol=None,
-                                               start_pk=None, start_rk=None,
-                                               end_pk=None, end_rk=None):
-        '''
-        Generates a shared access signature for the table.
-        Use the returned signature with the sas_token parameter of TableService.
-
-        :param str table_name:
-            The name of the table to create a SAS token for.
-        :param TablePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_table_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str start_pk:
-            The minimum partition key accessible with this shared access 
-            signature. startpk must accompany startrk. Key values are inclusive. 
-            If omitted, there is no lower bound on the table entities that can 
-            be accessed.
-        :param str start_rk:
-            The minimum row key accessible with this shared access signature. 
-            startpk must accompany startrk. Key values are inclusive. If 
-            omitted, there is no lower bound on the table entities that can be 
-            accessed.
-        :param str end_pk:
-            The maximum partition key accessible with this shared access 
-            signature. endpk must accompany endrk. Key values are inclusive. If 
-            omitted, there is no upper bound on the table entities that can be 
-            accessed.
-        :param str end_rk:
-            The maximum row key accessible with this shared access signature. 
-            endpk must accompany endrk. Key values are inclusive. If omitted, 
-            there is no upper bound on the table entities that can be accessed.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_table(
-            table_name,
-            permission=permission,
-            expiry=expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            start_pk=start_pk,
-            start_rk=start_rk,
-            end_pk=end_pk,
-            end_rk=end_rk,
-        )
-
-    def get_table_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Table service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The table service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = '/'
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def get_table_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Table service, including
-        logging, analytics and CORS rules.
-
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The table service properties.
-        :rtype: :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = '/'
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def set_table_service_properties(self, logging=None, hour_metrics=None,
-                                     minute_metrics=None, cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Table service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved. 
-        For more information on Azure Storage Analytics, see 
-        https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
-
-        :param Logging logging:
-            The logging settings provide request logs.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for tables.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for tables.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service. For detailed information 
-            about CORS rules and evaluation logic, see 
-            https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = '/'
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
-
-        self._perform_request(request)
-
-    def list_tables(self, num_results=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the tables. The generator will lazily follow 
-        the continuation tokens returned by the service and stop when all tables 
-        have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        tables, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param int num_results:
-            The maximum number of tables to return.
-        :param marker:
-            An opaque continuation object. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :type marker: obj
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        :return: A generator which produces :class:`~azure.storage.common.models.table.Table` objects.
-        :rtype: :class:`~azure.storage.common.models.ListGenerator`:
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout,
-                  '_context': operation_context}
-        resp = self._list_tables(**kwargs)
-
-        return ListGenerator(resp, self._list_tables, (), kwargs)
-
-    def _list_tables(self, max_results=None, marker=None, timeout=None, _context=None):
-        '''
-        Returns a list of tables under the specified account. Makes a single list 
-        request to the service. Used internally by the list_tables method.
-
-        :param int max_results:
-            The maximum number of tables to return. A single list request may 
-            return up to 1000 tables and potentially a continuation token which 
-            should be followed to get additional resutls.
-        :param marker:
-            A dictionary which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            tables. The marker value is opaque to the client.
-        :type marker: obj
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A list of tables, potentially with a next_marker property.
-        :rtype: list(:class:`~azure.storage.common.models.table.Table`)
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = '/Tables'
-        request.headers = {'Accept': TablePayloadFormat.JSON_NO_METADATA}
-        request.query = {
-            '$top': _int_to_str(max_results),
-            'NextTableName': _to_str(marker),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_json_response_to_tables,
-                                     operation_context=_context)
-
-    def create_table(self, table_name, fail_on_exist=False, timeout=None):
-        '''
-        Creates a new table in the storage account.
-
-        :param str table_name:
-            The name of the table to create. The table name may contain only
-            alphanumeric characters and cannot begin with a numeric character.
-            It is case-insensitive and must be from 3 to 63 characters long.
-        :param bool fail_on_exist:
-            Specifies whether to throw an exception if the table already exists.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the table was created. If fail_on_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('table', table_name)
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host_locations = self._get_host_locations()
-        request.path = '/Tables'
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
-            _DEFAULT_PREFER_HEADER[0]: _DEFAULT_PREFER_HEADER[1],
-            _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]
-        }
-        request.body = _get_request_body(_convert_table_to_json(table_name))
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def exists(self, table_name, timeout=None):
-        '''
-        Returns a boolean indicating whether the table exists.
-
-        :param str table_name:
-            The name of table to check for existence.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A boolean indicating whether the table exists.
-        :rtype: bool
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = '/Tables' + "('" + table_name + "')"
-        request.headers = {'Accept': TablePayloadFormat.JSON_NO_METADATA}
-        request.query = {'timeout': _int_to_str(timeout)}
-
-        try:
-            self._perform_request(request)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def delete_table(self, table_name, fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified table and any data it contains.
-
-        When a table is successfully deleted, it is immediately marked for deletion 
-        and is no longer accessible to clients. The table is later removed from 
-        the Table service during garbage collection.
-
-        Note that deleting a table is likely to take at least 40 seconds to complete. 
-        If an operation is attempted against the table while it was being deleted, 
-        an :class:`AzureConflictHttpError` will be thrown.
-
-        :param str table_name:
-            The name of the table to delete.
-        :param bool fail_not_exist:
-            Specifies whether to throw an exception if the table doesn't exist.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the table was deleted. If fail_not_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = '/Tables(\'' + _to_str(table_name) + '\')'
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]}
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_table_acl(self, table_name, timeout=None):
-        '''
-        Returns details about any stored access policies specified on the
-        table that may be used with Shared Access Signatures.
-
-        :param str table_name:
-            The name of an existing table.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the table.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = '/' + _to_str(table_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_table_acl(self, table_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets stored access policies for the table that may be used with Shared 
-        Access Signatures. 
-        
-        When you set permissions for a table, the existing permissions are replaced. 
-        To update the table's permissions, call :func:`~get_table_acl` to fetch 
-        all access policies associated with the table, modify the access policy 
-        that you wish to change, and then call this function with the complete 
-        set of data to perform the update.
-
-        When you establish a stored access policy on a table, it may take up to 
-        30 seconds to take effect. During this interval, a shared access signature 
-        that is associated with the stored access policy will throw an 
-        :class:`AzureHttpError` until the access policy becomes active.
-
-        :param str table_name:
-            The name of an existing table.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the table. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('table_name', table_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = '/' + _to_str(table_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        self._perform_request(request)
-
-    def query_entities(self, table_name, filter=None, select=None, num_results=None,
-                       marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
-                       property_resolver=None, timeout=None):
-        '''
-        Returns a generator to list the entities in the table specified. The 
-        generator will lazily follow the continuation tokens returned by the 
-        service and stop when all entities have been returned or num_results is
-        reached.
-
-        If num_results is specified and the account has more than that number of
-        entities, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str table_name:
-            The name of the table to query.
-        :param str filter:
-            Returns only entities that satisfy the specified filter. Note that 
-            no more than 15 discrete comparisons are permitted within a $filter 
-            string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx 
-            for more information on constructing filters.
-        :param str select:
-            Returns only the desired properties of an entity from the set.
-        :param int num_results:
-            The maximum number of entities to return.
-        :param marker:
-            An opaque continuation object. This value can be retrieved from the 
-            next_marker field of a previous generator object if max_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :type marker: obj
-        :param str accept:
-            Specifies the accepted content type of the response payload. See 
-            :class:`~azure.storage.table.models.TablePayloadFormat` for possible 
-            values.
-        :param property_resolver:
-            A function which given the partition key, row key, property name, 
-            property value, and the property EdmType if returned by the service, 
-            returns the EdmType of the property. Generally used if accept is set 
-            to JSON_NO_METADATA.
-        :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        :return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
-        :rtype: :class:`~azure.storage.common.models.ListGenerator`
-        '''
-
-        operation_context = _OperationContext(location_lock=True)
-        if self.key_encryption_key is not None or self.key_resolver_function is not None:
-            # If query already requests all properties, no need to add the metadata columns
-            if select is not None and select != '*':
-                select += ',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2'
-
-        args = (table_name,)
-        kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker,
-                  'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout,
-                  '_context': operation_context}
-        resp = self._query_entities(*args, **kwargs)
-
-        return ListGenerator(resp, self._query_entities, args, kwargs)
-
-    def _query_entities(self, table_name, filter=None, select=None, max_results=None,
-                        marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
-                        property_resolver=None, timeout=None, _context=None):
-        '''
-        Returns a list of entities under the specified table. Makes a single list 
-        request to the service. Used internally by the query_entities method.
-
-        :param str table_name:
-            The name of the table to query.
-        :param str filter:
-            Returns only entities that satisfy the specified filter. Note that 
-            no more than 15 discrete comparisons are permitted within a $filter 
-            string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx 
-            for more information on constructing filters.
-        :param str select:
-            Returns only the desired properties of an entity from the set.
-        :param int max_results:
-            The maximum number of entities to return.
-        :param obj marker:
-            A dictionary which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            table. The marker value is opaque to the client.
-        :param str accept:
-            Specifies the accepted content type of the response payload. See 
-            :class:`~azure.storage.table.models.TablePayloadFormat` for possible 
-            values.
-        :param property_resolver:
-            A function which given the partition key, row key, property name, 
-            property value, and the property EdmType if returned by the service, 
-            returns the EdmType of the property. Generally used if accept is set 
-            to JSON_NO_METADATA.
-        :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A list of entities, potentially with a next_marker property.
-        :rtype: list(:class:`~azure.storage.table.models.Entity`)
-        '''
-        _validate_not_none('table_name', table_name)
-        _validate_not_none('accept', accept)
-        next_partition_key = None if marker is None else marker.get('nextpartitionkey')
-        next_row_key = None if marker is None else marker.get('nextrowkey')
-
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = '/' + _to_str(table_name) + '()'
-        request.headers = {'Accept': _to_str(accept)}
-        request.query = {
-            '$filter': _to_str(filter),
-            '$select': _to_str(select),
-            '$top': _int_to_str(max_results),
-            'NextPartitionKey': _to_str(next_partition_key),
-            'NextRowKey': _to_str(next_row_key),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_json_response_to_entities,
-                                     [property_resolver, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function],
-                                     operation_context=_context)
-
-    def commit_batch(self, table_name, batch, timeout=None):
-        '''
-        Commits a :class:`~azure.storage.table.TableBatch` request.
-
-        :param str table_name:
-            The name of the table to commit the batch to.
-        :param TableBatch batch:
-            The batch to commit.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A list of the batch responses corresponding to the requests in the batch.
-            The items could either be an etag, in case of success, or an error object in case of failure.
-        :rtype: list(:class:`~azure.storage.table.models.AzureBatchOperationError`, str)
-        '''
-        _validate_not_none('table_name', table_name)
-
-        # Construct the batch request
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host_locations = self._get_host_locations()
-        request.path = '/' + '$batch'
-        request.query = {'timeout': _int_to_str(timeout)}
-
-        # Update the batch operation requests with table and client specific info
-        for row_key, batch_request in batch._requests:
-            if batch_request.method == 'POST':
-                batch_request.path = '/' + _to_str(table_name)
-            else:
-                batch_request.path = _get_entity_path(table_name, batch._partition_key, row_key)
-            if self.is_emulated:
-                batch_request.path = '/' + DEV_ACCOUNT_NAME + batch_request.path
-            _update_request(batch_request)
-
-        # Construct the batch body
-        request.body, boundary = _convert_batch_to_json(batch._requests)
-        request.headers = {'Content-Type': boundary}
-
-        # Perform the batch request and return the response
-        return self._perform_request(request, _parse_batch_response)
-
-    @contextmanager
-    def batch(self, table_name, timeout=None):
-        '''
-        Creates a batch object which can be used as a context manager. Commits the batch on exit.
-
-        :param str table_name:
-            The name of the table to commit the batch to.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        batch = TableBatch(self.require_encryption, self.key_encryption_key, self.encryption_resolver_function)
-        yield batch
-        self.commit_batch(table_name, batch, timeout=timeout)
-
-    def get_entity(self, table_name, partition_key, row_key, select=None,
-                   accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
-                   property_resolver=None, timeout=None):
-        '''
-        Get an entity from the specified table. Throws if the entity does not exist.
-
-        :param str table_name:
-            The name of the table to get the entity from.
-        :param str partition_key:
-            The PartitionKey of the entity.
-        :param str row_key:
-            The RowKey of the entity.
-        :param str select:
-            Returns only the desired properties of an entity from the set.
-        :param str accept:
-            Specifies the accepted content type of the response payload. See 
-            :class:`~azure.storage.table.models.TablePayloadFormat` for possible 
-            values.
-        :param property_resolver:
-            A function which given the partition key, row key, property name, 
-            property value, and the property EdmType if returned by the service, 
-            returns the EdmType of the property. Generally used if accept is set 
-            to JSON_NO_METADATA.
-        :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The retrieved entity.
-        :rtype: :class:`~azure.storage.table.models.Entity`
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _get_entity(partition_key, row_key, select, accept)
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_entity_path(table_name, partition_key, row_key)
-        request.query['timeout'] = _int_to_str(timeout)
-
-        return self._perform_request(request, _convert_json_response_to_entity,
-                                     [property_resolver, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def insert_entity(self, table_name, entity, timeout=None):
-        '''
-        Inserts a new entity into the table. Throws if an entity with the same 
-        PartitionKey and RowKey already exists.
-
-        When inserting an entity into a table, you must specify values for the 
-        PartitionKey and RowKey system properties. Together, these properties 
-        form the primary key and must be unique within the table. Both the 
-        PartitionKey and RowKey values must be string values; each key value may 
-        be up to 64 KB in size. If you are using an integer value for the key 
-        value, you should convert the integer to a fixed-width string, because 
-        they are canonically sorted. For example, you should convert the value 
-        1 to 0000001 to ensure proper sorting.
-
-        :param str table_name:
-            The name of the table to insert the entity into.
-        :param entity:
-            The entity to insert. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the inserted entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-
-        request = _insert_entity(entity, self.require_encryption, self.key_encryption_key,
-                                 self.encryption_resolver_function)
-        request.host_locations = self._get_host_locations()
-        request.path = '/' + _to_str(table_name)
-        request.query['timeout'] = _int_to_str(timeout)
-
-        return self._perform_request(request, _extract_etag)
-
-    def update_entity(self, table_name, entity, if_match='*', timeout=None):
-        '''
-        Updates an existing entity in a table. Throws if the entity does not exist. 
-        The update_entity operation replaces the entire entity and can be used to 
-        remove properties.
-
-        :param str table_name:
-            The name of the table containing the entity to update.
-        :param entity:
-            The entity to update. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The update operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional update, set If-Match to the wildcard character (*).
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _update_entity(entity, if_match, self.require_encryption, self.key_encryption_key,
-                                 self.encryption_resolver_function)
-        request.host_locations = self._get_host_locations()
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-        request.query['timeout'] = _int_to_str(timeout)
-
-        return self._perform_request(request, _extract_etag)
-
-    def merge_entity(self, table_name, entity, if_match='*', timeout=None):
-        '''
-        Updates an existing entity by merging the entity's properties. Throws 
-        if the entity does not exist. 
-        
-        This operation does not replace the existing entity as the update_entity
-        operation does. A property cannot be removed with merge_entity.
-        
-        Any properties with null values are ignored. All other properties will be 
-        updated or added.
-
-        :param str table_name:
-            The name of the table containing the entity to merge.
-        :param entity:
-            The entity to merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The merge operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional merge, set If-Match to the wildcard character (*).
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-
-        _validate_not_none('table_name', table_name)
-
-        request = _merge_entity(entity, if_match, self.require_encryption,
-                                self.key_encryption_key)
-        request.host_locations = self._get_host_locations()
-        request.query['timeout'] = _int_to_str(timeout)
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-
-        return self._perform_request(request, _extract_etag)
-
-    def delete_entity(self, table_name, partition_key, row_key,
-                      if_match='*', timeout=None):
-        '''
-        Deletes an existing entity in a table. Throws if the entity does not exist.
-
-        When an entity is successfully deleted, the entity is immediately marked 
-        for deletion and is no longer accessible to clients. The entity is later 
-        removed from the Table service during garbage collection.
-
-        :param str table_name:
-            The name of the table containing the entity to delete.
-        :param str partition_key:
-            The PartitionKey of the entity.
-        :param str row_key:
-            The RowKey of the entity.
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The delete operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional delete, set If-Match to the wildcard character (*).
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _delete_entity(partition_key, row_key, if_match)
-        request.host_locations = self._get_host_locations()
-        request.query['timeout'] = _int_to_str(timeout)
-        request.path = _get_entity_path(table_name, partition_key, row_key)
-
-        self._perform_request(request)
-
-    def insert_or_replace_entity(self, table_name, entity, timeout=None):
-        '''
-        Replaces an existing entity or inserts a new entity if it does not
-        exist in the table. Because this operation can insert or update an
-        entity, it is also known as an "upsert" operation.
-
-        If insert_or_replace_entity is used to replace an entity, any properties 
-        from the previous entity will be removed if the new entity does not define 
-        them.
-
-        :param str table_name:
-            The name of the table in which to insert or replace the entity.
-        :param entity:
-            The entity to insert or replace. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _insert_or_replace_entity(entity, self.require_encryption, self.key_encryption_key,
-                                            self.encryption_resolver_function)
-        request.host_locations = self._get_host_locations()
-        request.query['timeout'] = _int_to_str(timeout)
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-
-        return self._perform_request(request, _extract_etag)
-
-    def insert_or_merge_entity(self, table_name, entity, timeout=None):
-        '''
-        Merges an existing entity or inserts a new entity if it does not exist
-        in the table. 
-
-        If insert_or_merge_entity is used to merge an entity, any properties from 
-        the previous entity will be retained if the request does not define or 
-        include them.
-
-        :param str table_name:
-            The name of the table in which to insert or merge the entity.
-        :param entity:
-            The entity to insert or merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: dict or :class:`~azure.storage.table.models.Entity`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-
-        _validate_not_none('table_name', table_name)
-        request = _insert_or_merge_entity(entity, self.require_encryption,
-                                          self.key_encryption_key)
-        request.host_locations = self._get_host_locations()
-        request.query['timeout'] = _int_to_str(timeout)
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-
-        return self._perform_request(request, _extract_etag)
-
-    def _perform_request(self, request, parser=None, parser_args=None, operation_context=None):
-        _update_storage_table_header(request)
-        return super(TableService, self)._perform_request(request, parser, parser_args, operation_context)
diff -pruN 1.4.0-1/azure/multiapi/storage/__init__.py 1.5.0-1/azure/multiapi/storage/__init__.py
--- 1.4.0-1/azure/multiapi/storage/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-﻿__path__ = __import__('pkgutil').extend_path(__path__, __name__)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,37 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from ._constants import (
-    __author__,
-    __version__,
-    X_MS_VERSION,
-)
-
-from .models import (
-    RetentionPolicy,
-    Logging,
-    Metrics,
-    CorsRule,
-    ServiceProperties,
-    AccessPolicy,
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-    Protocol,
-)
-
-from .cloudstorageaccount import CloudStorageAccount
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_auth.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_auth.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_auth.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_auth.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,120 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from ._common_conversion import (
-    _sign_string,
-)
-
-
-class _StorageSharedKeyAuthentication(object):
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.headers if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = request.path.split('?')[0]
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.headers:
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        signature = _sign_string(self.account_key, string_to_sign)
-        auth_string = 'SharedKey ' + self.account_name + ':' + signature
-        request.headers.append(('Authorization', auth_string))
-
-
-class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        request.query.sort()
-
-        string_to_sign = ''
-        for name, value in request.query:
-            if value:
-                string_to_sign += '\n' + name.lower() + ':' + value
-
-        return string_to_sign
-
-
-class _StorageTableSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                ['content-md5', 'content-type', 'x-ms-date'],
-            ) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        for name, value in request.query:
-            if name == 'comp':
-                return '?comp=' + value
-        return ''
-
-
-class _StorageNoAuthentication(object):
-    def sign_request(self, request):
-        pass
-
-
-class _StorageSASAuthentication(object):
-    def __init__(self, sas_token):
-        self.sas_token = sas_token
-
-    def sign_request(self, request):
-        if '?' in request.path:
-            request.path += '&'
-        else:
-            request.path += '?'
-
-        request.path += self.sas_token
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_common_conversion.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_common_conversion.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_common_conversion.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_common_conversion.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,103 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-import sys
-from dateutil.tz import tzutc
-
-from .models import (
-    _unicode_type,
-)
-
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-def _int_to_str(value):
-    return str(int(value)) if value is not None else None
-
-def _bool_to_str(value):
-    if value is None:
-        return None
-
-    if isinstance(value, bool):
-        if value:
-            return 'true'
-        else:
-            return 'false'
-
-    return str(value)
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-def _datetime_to_utc_string(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value is None:
-        return None
-
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-
-    return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
-
-def _encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def _decode_base64_to_bytes(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def _decode_base64_to_text(data):
-    decoded_bytes = _decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def _sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = _decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, _unicode_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, _unicode_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = _encode_base64(digest)
-    return encoded_digest
-
-
-def _lower(text):
-    return text.lower()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_connection.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_connection.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_connection.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_connection.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,141 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import os
-import sys
-if sys.version_info >= (3,):
-    from urllib.parse import urlparse
-else:
-    from urlparse import urlparse
-
-from ._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_KEY,
-    DEV_BLOB_HOST,
-    DEV_QUEUE_HOST,
-    DEV_TABLE_HOST
-)
-from ._error import (
-    _ERROR_STORAGE_MISSING_INFO,
-)
-
-_EMULATOR_ENDPOINTS = {
-   'blob': DEV_BLOB_HOST,
-   'queue': DEV_QUEUE_HOST,
-   'table': DEV_TABLE_HOST,
-   'file': '',
-}
-
-_CONNECTION_ENDPONTS = {
-    'blob': 'BlobEndpoint',
-    'queue': 'QueueEndpoint',
-    'table': 'TableEndpoint',
-    'file': 'FileEndpoint',
-}
-
-class _ServiceParameters(object):
-    def __init__(self, service, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, 
-                 custom_domain=None):
-
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.protocol = protocol or DEFAULT_PROTOCOL
-
-        if is_emulated:
-            self.account_name = DEV_ACCOUNT_NAME
-            self.protocol = 'http'
-
-            # Only set the account key if a sas_token is not present to allow sas to be used with the emulator
-            self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
-
-            self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], self.account_name)
-            self.secondary_endpoint = '{}/{}-secondary'.format(_EMULATOR_ENDPOINTS[service], self.account_name)
-        else:
-            # Strip whitespace from the key
-            if self.account_key:
-                self.account_key = self.account_key.strip()
-
-            endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
-
-            # Setup the primary endpoint
-            if custom_domain:
-                parsed_url = urlparse(custom_domain)
-                self.primary_endpoint = parsed_url.netloc + parsed_url.path
-                self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme
-            else:
-                if not self.account_name:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)         
-                self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
-            
-            # Setup the secondary endpoint
-            if self.account_name:
-                self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
-            else:
-                self.secondary_endpoint = None
-
-    @staticmethod
-    def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, is_emulated=None, 
-                 protocol=None, endpoint_suffix=None, custom_domain=None, request_session=None, 
-                 connection_string=None):
-        if connection_string:
-            params = _ServiceParameters._from_connection_string(connection_string, service)
-        elif is_emulated:
-            params = _ServiceParameters(service, is_emulated=True)
-        elif account_name:
-            params = _ServiceParameters(service,
-                                      account_name=account_name, 
-                                      account_key=account_key, 
-                                      sas_token=sas_token, 
-                                      is_emulated=is_emulated, 
-                                      protocol=protocol, 
-                                      endpoint_suffix=endpoint_suffix,
-                                      custom_domain=custom_domain)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        params.request_session = request_session
-        return params
-
-    @staticmethod
-    def _from_connection_string(connection_string, service):
-        # Split into key=value pairs removing empties, then split the pairs into a dict
-        config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
-
-        # Authentication
-        account_name = config.get('AccountName')
-        account_key = config.get('AccountKey')
-        sas_token = config.get('SharedAccessSignature')
-
-        # Emulator
-        is_emulated = config.get('UseDevelopmentStorage')
-
-        # Basic URL Configuration
-        protocol = config.get('DefaultEndpointsProtocol')
-        endpoint_suffix = config.get('EndpointSuffix')
-
-        # Custom URLs
-        endpoint = config.get(_CONNECTION_ENDPONTS[service])
-
-        return _ServiceParameters(service,
-                                  account_name=account_name, 
-                                  account_key=account_key, 
-                                  sas_token=sas_token, 
-                                  is_emulated=is_emulated, 
-                                  protocol=protocol, 
-                                  endpoint_suffix=endpoint_suffix,
-                                  custom_domain=endpoint)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_constants.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,40 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import platform
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '0.30.0'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2015-04-05'
-
-# UserAgent string sample: 'Azure-Storage/0.30.0 (Python CPython 3.4.2; Windows 8)'
-_USER_AGENT_STRING = 'Azure-Storage/{} (Python {} {}; {} {})'.format(__version__, platform.python_implementation(), platform.python_version(), platform.system(), platform.release())
-
-# Live ServiceClient URLs
-SERVICE_HOST_BASE = 'core.windows.net'
-DEFAULT_PROTOCOL = 'https'
-
-# Development ServiceClient URLs
-DEV_BLOB_HOST = '127.0.0.1:10000'
-DEV_QUEUE_HOST = '127.0.0.1:10001'
-DEV_TABLE_HOST = '127.0.0.1:10002'
-
-# Default credentials for Development Storage Service
-DEV_ACCOUNT_NAME = 'devstoreaccount1'
-DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
-
-# Socket timeout in seconds is 5 min * 60 seconds
-_SOCKET_TIMEOUT = 5 * 60
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,288 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from dateutil import parser
-from ._common_conversion import _to_str
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    ServiceProperties,
-    Logging,
-    Metrics,
-    CorsRule,
-    AccessPolicy,
-    _HeaderDict,
-    _dict,
-)
-
-def _int_to_str(value):
-    return value if value is None else int(value)
-
-def _get_download_size(start_range, end_range, resource_size):
-    if start_range is not None:
-        end_range = end_range if end_range else (resource_size if resource_size else None)
-        if end_range is not None:
-            return end_range - start_range
-        else:
-            return None
-    else:
-        return resource_size
-
-GET_PROPERTIES_ATTRIBUTE_MAP = {
-    'last-modified': (None, 'last_modified', parser.parse),
-    'etag': (None, 'etag', _to_str),
-    'x-ms-blob-type': (None, 'blob_type', _to_str),
-    'content-length': (None, 'content_length', _int_to_str),
-    'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _int_to_str),
-    'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _int_to_str),
-    'x-ms-share-quota': (None, 'quota', _int_to_str),
-    'content-type': ('content_settings', 'content_type', _to_str),
-    'cache-control': ('content_settings', 'cache_control', _to_str),
-    'content-encoding': ('content_settings', 'content_encoding', _to_str),
-    'content-disposition': ('content_settings', 'content_disposition', _to_str),
-    'content-language': ('content_settings', 'content_language', _to_str),
-    'content-md5': ('content_settings', 'content_md5', _to_str),
-    'x-ms-lease-status': ('lease', 'status', _to_str),
-    'x-ms-lease-state': ('lease', 'state', _to_str),
-    'x-ms-lease-duration': ('lease', 'duration', _to_str),
-    'x-ms-copy-id': ('copy', 'id', _to_str),
-    'x-ms-copy-source': ('copy', 'source', _to_str),
-    'x-ms-copy-status': ('copy', 'status', _to_str),
-    'x-ms-copy-progress': ('copy', 'progress', _to_str),
-    'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
-    'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
-}
-
-def _parse_metadata(response):
-    '''
-    Extracts out resource metadata information.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    metadata = _dict()
-    for key, value in response.headers:
-        if key.startswith('x-ms-meta-'):
-            metadata[key[10:]] = _to_str(value)
-
-    return metadata
-
-def _parse_properties(response, result_class):
-    '''
-    Extracts out resource properties and metadata information.
-    Ignores the standard http headers.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    props = result_class()
-    for key, value in response.headers:
-        info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
-        if info:
-            if info[0] is None:
-                setattr(props, info[1], info[2](value))
-            else:
-                attr = getattr(props, info[0])
-                setattr(attr, info[1], info[2](value))
-
-    return props
-
-def _parse_response_for_dict(response):
-    ''' Extracts name-values from response header. Filter out the standard
-    http headers.'''
-
-    if response is None:
-        return None
-    http_headers = ['server', 'date', 'location', 'host',
-                    'via', 'proxy-connection', 'connection']
-    return_dict = _HeaderDict()
-    if response.headers:
-        for name, value in response.headers:
-            if not name.lower() in http_headers:
-                return_dict[name] = value
-
-    return return_dict
-
-def _convert_xml_to_signed_identifiers(xml):
-    list_element = ETree.fromstring(xml)
-    signed_identifiers = _dict()
-
-    for signed_identifier_element in list_element.findall('SignedIdentifier'):
-        # Id element
-        id = signed_identifier_element.find('Id').text
-
-        # Access policy element
-        access_policy = AccessPolicy()
-        access_policy_element = signed_identifier_element.find('AccessPolicy')
-
-        start_element = access_policy_element.find('Start')
-        if start_element is not None:
-            access_policy.start = parser.parse(start_element.text)
-
-        expiry_element = access_policy_element.find('Expiry')
-        if expiry_element is not None:
-            access_policy.expiry = parser.parse(expiry_element.text)
-
-        access_policy.permission = access_policy_element.findtext('Permission')
-
-        signed_identifiers[id] = access_policy
-
-    return signed_identifiers
-
-def _convert_xml_to_service_properties(xml):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-    </StorageServiceProperties>
-    '''
-    service_properties_element = ETree.fromstring(xml)
-    service_properties = ServiceProperties()
-    
-    # Logging
-    logging = service_properties_element.find('Logging')
-    if logging is not None:
-        service_properties.logging = Logging()
-        service_properties.logging.version = logging.find('Version').text
-        service_properties.logging.delete = _bool(logging.find('Delete').text)
-        service_properties.logging.read = _bool(logging.find('Read').text)
-        service_properties.logging.write = _bool(logging.find('Write').text)
-
-        _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), 
-                                            service_properties.logging.retention_policy)
-    # HourMetrics
-    hour_metrics_element = service_properties_element.find('HourMetrics')
-    if hour_metrics_element is not None:
-        service_properties.hour_metrics = Metrics()
-        _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
-
-    # MinuteMetrics
-    minute_metrics_element = service_properties_element.find('MinuteMetrics')
-    if minute_metrics_element is not None:
-        service_properties.minute_metrics = Metrics()
-        _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
-
-    # CORS
-    cors = service_properties_element.find('Cors')
-    if cors is not None:
-        service_properties.cors = list()
-        for rule in cors.findall('CorsRule'):
-            allowed_origins = rule.find('AllowedOrigins').text.split(',')
-
-            allowed_methods = rule.find('AllowedMethods').text.split(',')
-
-            max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
-
-            cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
-
-            exposed_headers = rule.find('ExposedHeaders').text
-            if exposed_headers is not None:
-                cors_rule.exposed_headers = exposed_headers.split(',')
-
-            allowed_headers = rule.find('AllowedHeaders').text
-            if allowed_headers is not None:
-                cors_rule.allowed_headers = allowed_headers.split(',')
-
-            service_properties.cors.append(cors_rule)
-
-    # Target version
-    target_version = service_properties_element.find('DefaultServiceVersion')
-    if target_version is not None:
-        service_properties.target_version = target_version.text
-
-    return service_properties
-
-
-def _convert_xml_to_metrics(xml, metrics):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    metrics.version = xml.find('Version').text
-
-    # Enabled
-    metrics.enabled = _bool(xml.find('Enabled').text)
-
-    # IncludeAPIs
-    include_apis_element = xml.find('IncludeAPIs')
-    if include_apis_element is not None:
-        metrics.include_apis = _bool(include_apis_element.text)
-
-    # RetentionPolicy
-    _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
-
-
-def _convert_xml_to_retention_policy(xml, retention_policy):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    retention_policy.enabled = _bool(xml.find('Enabled').text)
-
-    # Days
-    days_element =  xml.find('Days')
-    if days_element is not None:
-        retention_policy.days = int(days_element.text)
-
-
-def _bool(value):
-    return value.lower() == 'true'
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_error.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,81 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-from azure.common import (
-    AzureHttpError,
-    AzureConflictHttpError,
-    AzureMissingResourceHttpError,
-)
-
-_ERROR_CONFLICT = 'Conflict ({0})'
-_ERROR_NOT_FOUND = 'Not found ({0})'
-_ERROR_UNKNOWN = 'Unknown error ({0})'
-_ERROR_STORAGE_MISSING_INFO = \
-    'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
-_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
-    'The emulator does not support the file service.'
-_ERROR_ACCESS_POLICY = \
-    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
-    'instance'
-_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
-_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
-_ERROR_VALUE_NONE = '{0} should not be None.'
-_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
-_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use {0} chunk downloader more than 1 thread must be ' + \
-    'used since get_{0}_to_bytes should be called for single threaded ' + \
-    '{0} downloads.'
-_ERROR_START_END_NEEDED_FOR_MD5 = \
-    'Both end_range and start_range need to be specified ' + \
-    'for getting content MD5.'
-_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
-    'Getting content MD5 for a range greater than 4MB ' + \
-    'is not supported.'
-
-def _dont_fail_on_exist(error):
-    ''' don't throw exception if the resource exists.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureConflictHttpError):
-        return False
-    else:
-        raise error
-
-
-def _dont_fail_not_exist(error):
-    ''' don't throw exception if the resource doesn't exist.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureMissingResourceHttpError):
-        return False
-    else:
-        raise error
-
-
-def _general_error_handler(http_error):
-    ''' Simple error handler for azure.'''
-    message = str(http_error)
-    if http_error.respbody is not None:
-        message += '\n' + http_error.respbody.decode('utf-8-sig')
-    raise AzureHttpError(message, http_error.status)
-
-
-def _validate_type_bytes(param_name, param):
-    if not isinstance(param, bytes):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError(_ERROR_VALUE_NONE.format(param_name))
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,79 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-class HTTPError(Exception):
-
-    ''' HTTP Exception when response status code >= 300 '''
-
-    def __init__(self, status, message, respheader, respbody):
-        '''Creates a new HTTPError with the specified status, message,
-        response headers and body'''
-        self.status = status
-        self.respheader = respheader
-        self.respbody = respbody
-        Exception.__init__(self, message)
-
-
-class HTTPResponse(object):
-
-    """Represents a response from an HTTP request.  An HTTPResponse has the
-    following attributes:
-    
-    status:
-        the status code of the response
-    message:
-        the message
-    headers:
-        the returned headers, as a list of (name, value) pairs
-    body:
-        the body of the response
-    """
-
-    def __init__(self, status, message, headers, body):
-        self.status = status
-        self.message = message
-        self.headers = headers
-        self.body = body
-
-
-class HTTPRequest(object):
-
-    '''Represents an HTTP Request.  An HTTP Request consists of the following
-    attributes:
-    host:
-        the host name to connect to
-    method:
-        the method to use to connect (string such as GET, POST, PUT, etc.)
-    path:
-        the uri fragment
-    query:
-        query parameters specified as a list of (name, value) pairs
-    headers:
-        header values specified as (name, value) pairs
-    body:
-        the body of the request.
-    protocol_override:
-        specify to use this protocol instead of the global one stored in
-        _HTTPClient.
-    '''
-
-    def __init__(self):
-        self.host = ''
-        self.method = ''
-        self.path = ''
-        self.query = []      # list of (name, value)
-        self.headers = []    # list of (header name, header value)
-        self.body = ''
-        self.protocol_override = None
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/batchclient.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/batchclient.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/batchclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/batchclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,351 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-import uuid
-
-from azure.common import (
-    AzureHttpError,
-)
-from ..models import (
-    AzureBatchOperationError,
-    AzureBatchValidationError,
-)
-from .._common_error import (
-    _ERROR_CANNOT_FIND_PARTITION_KEY,
-    _ERROR_CANNOT_FIND_ROW_KEY,
-    _ERROR_INCORRECT_TABLE_IN_BATCH,
-    _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
-    _ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
-    _ERROR_BATCH_COMMIT_FAIL,
-)
-from .._common_serialization import (
-    ETree,
-    url_unquote,
-    _get_etree_text,
-    _etree_entity_feed_namespaces,
-    _update_request_uri_query,
-)
-from ..table._serialization import (
-    _update_storage_table_header,
-)
-from . import HTTPError, HTTPRequest, HTTPResponse
-from .httpclient import _HTTPClient
-
-_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
-
-if sys.version_info < (3,):
-    def _new_boundary():
-        return str(uuid.uuid1())
-else:
-    def _new_boundary():
-        return str(uuid.uuid1()).encode('utf-8')
-
-
-class _BatchClient(_HTTPClient):
-
-    '''
-    This is the class that is used for batch operation for storage table
-    service. It only supports one changeset.
-    '''
-
-    def __init__(self, service_instance, authentication,
-                 protocol='http', request_session=None, timeout=65, user_agent=''):
-        _HTTPClient.__init__(self, service_instance, protocol=protocol, request_session=request_session, timeout=timeout, user_agent=user_agent)
-        self.authentication = authentication
-        self.is_batch = False
-        self.batch_requests = []
-        self.batch_table = ''
-        self.batch_partition_key = ''
-        self.batch_row_keys = []
-
-    def get_request_table(self, request):
-        '''
-        Extracts table name from request.uri. The request.uri has either
-        "/mytable(...)" or "/mytable" format.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        if '(' in request.path:
-            pos = request.path.find('(')
-            return request.path[1:pos]
-        else:
-            return request.path[1:]
-
-    def get_request_partition_key(self, request):
-        '''
-        Extracts PartitionKey from request.body if it is a POST request or from
-        request.path if it is not a POST request. Only insert operation request
-        is a POST request and the PartitionKey is in the request body.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        if request.method == 'POST':
-            doc = ETree.fromstring(request.body)
-            part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
-            if part_key is None:
-                raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
-            return _get_etree_text(part_key)
-        else:
-            uri = url_unquote(request.path)
-            pos1 = uri.find('PartitionKey=\'')
-            pos2 = uri.find('\',', pos1)
-            if pos1 == -1 or pos2 == -1:
-                raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
-            return uri[pos1 + len('PartitionKey=\''):pos2]
-
-    def get_request_row_key(self, request):
-        '''
-        Extracts RowKey from request.body if it is a POST request or from
-        request.path if it is not a POST request. Only insert operation request
-        is a POST request and the Rowkey is in the request body.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        if request.method == 'POST':
-            doc = ETree.fromstring(request.body)
-            row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
-            if row_key is None:
-                raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
-            return _get_etree_text(row_key)
-        else:
-            uri = url_unquote(request.path)
-            pos1 = uri.find('RowKey=\'')
-            pos2 = uri.find('\')', pos1)
-            if pos1 == -1 or pos2 == -1:
-                raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
-            row_key = uri[pos1 + len('RowKey=\''):pos2]
-            return row_key
-
-    def validate_request_table(self, request):
-        '''
-        Validates that all requests have the same table name. Set the table
-        name if it is the first request for the batch operation.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        if self.batch_table:
-            if self.get_request_table(request) != self.batch_table:
-                raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
-        else:
-            self.batch_table = self.get_request_table(request)
-
-    def validate_request_partition_key(self, request):
-        '''
-        Validates that all requests have the same PartitiionKey. Set the
-        PartitionKey if it is the first request for the batch operation.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        if self.batch_partition_key:
-            if self.get_request_partition_key(request) != \
-                self.batch_partition_key:
-                raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
-        else:
-            self.batch_partition_key = self.get_request_partition_key(request)
-
-    def validate_request_row_key(self, request):
-        '''
-        Validates that all requests have the different RowKey and adds RowKey
-        to existing RowKey list.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        if self.batch_row_keys:
-            if self.get_request_row_key(request) in self.batch_row_keys:
-                raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
-        else:
-            self.batch_row_keys.append(self.get_request_row_key(request))
-
-    def begin_batch(self):
-        '''
-        Starts the batch operation. Intializes the batch variables
-
-        is_batch:
-            batch operation flag.
-        batch_table:
-            the table name of the batch operation
-        batch_partition_key:
-            the PartitionKey of the batch requests.
-        batch_row_keys:
-            the RowKey list of adding requests.
-        batch_requests:
-            the list of the requests.
-        '''
-        self.is_batch = True
-        self.batch_table = ''
-        self.batch_partition_key = ''
-        self.batch_row_keys = []
-        self.batch_requests = []
-
-    def insert_request_to_batch(self, request):
-        '''
-        Adds request to batch operation.
-
-        request:
-            the request to insert, update or delete entity
-        '''
-        self.validate_request_table(request)
-        self.validate_request_partition_key(request)
-        self.validate_request_row_key(request)
-        self.batch_requests.append(request)
-
-    def commit_batch(self):
-        ''' Resets batch flag and commits the batch requests. '''
-        if self.is_batch:
-            self.is_batch = False
-            self.commit_batch_requests()
-
-    def commit_batch_requests(self):
-        ''' Commits the batch requests. '''
-
-        batch_boundary = b'batch_' + _new_boundary()
-        changeset_boundary = b'changeset_' + _new_boundary()
-
-        # Commits batch only the requests list is not empty.
-        if self.batch_requests:
-            request = HTTPRequest()
-            request.method = 'POST'
-            request.host = self.batch_requests[0].host
-            request.path = '/$batch'
-            request.headers = [
-                ('Content-Type', 'multipart/mixed; boundary=' + \
-                    batch_boundary.decode('utf-8')),
-                ('Accept', 'application/atom+xml,application/xml'),
-                ('Accept-Charset', 'UTF-8')]
-
-            request.body = b'--' + batch_boundary + b'\n'
-            request.body += b'Content-Type: multipart/mixed; boundary='
-            request.body += changeset_boundary + b'\n\n'
-
-            content_id = 1
-
-            # Adds each request body to the POST data.
-            for batch_request in self.batch_requests:
-                request.body += b'--' + changeset_boundary + b'\n'
-                request.body += b'Content-Type: application/http\n'
-                request.body += b'Content-Transfer-Encoding: binary\n\n'
-                request.body += batch_request.method.encode('utf-8')
-                request.body += b' http://'
-                request.body += batch_request.host.encode('utf-8')
-                request.body += batch_request.path.encode('utf-8')
-                request.body += b' HTTP/1.1\n'
-                request.body += b'Content-ID: '
-                request.body += str(content_id).encode('utf-8') + b'\n'
-                content_id += 1
-
-                # Add different headers for different type requests.
-                if not batch_request.method == 'DELETE':
-                    request.body += \
-                        b'Content-Type: application/atom+xml;type=entry\n'
-                    for name, value in batch_request.headers:
-                        if name == 'If-Match':
-                            request.body += name.encode('utf-8') + b': '
-                            request.body += value.encode('utf-8') + b'\n'
-                            break
-                    request.body += b'Content-Length: '
-                    request.body += str(len(batch_request.body)).encode('utf-8')
-                    request.body += b'\n\n'
-                    request.body += batch_request.body + b'\n'
-                else:
-                    for name, value in batch_request.headers:
-                        # If-Match should be already included in
-                        # batch_request.headers, but in case it is missing,
-                        # just add it.
-                        if name == 'If-Match':
-                            request.body += name.encode('utf-8') + b': '
-                            request.body += value.encode('utf-8') + b'\n\n'
-                            break
-                    else:
-                        request.body += b'If-Match: *\n\n'
-
-            request.body += b'--' + changeset_boundary + b'--' + b'\n'
-            request.body += b'--' + batch_boundary + b'--'
-
-            request.path, request.query = _update_request_uri_query(request)
-            request.headers = _update_storage_table_header(request)
-            self.authentication.sign_request(request)
-
-            # Submit the whole request as batch request.
-            response = self.perform_request(request)
-            if response.status >= 300:
-                # This exception will be caught by the general error handler
-                # and raised as an azure http exception
-                raise HTTPError(response.status,
-                                _ERROR_BATCH_COMMIT_FAIL,
-                                self.respheader,
-                                response.body)
-
-            # http://www.odata.org/documentation/odata-version-2-0/batch-processing/
-            # The body of a ChangeSet response is either a response for all the
-            # successfully processed change request within the ChangeSet,
-            # formatted exactly as it would have appeared outside of a batch, 
-            # or a single response indicating a failure of the entire ChangeSet.
-            responses = self._parse_batch_response(response.body)
-            if responses and responses[0].status >= 300:
-                self._report_batch_error(responses[0])
-
-    def cancel_batch(self):
-        ''' Resets the batch flag. '''
-        self.is_batch = False
-
-    def _parse_batch_response(self, body):
-        parts = body.split(b'--changesetresponse_')
-
-        responses = []
-        for part in parts:
-            httpLocation = part.find(b'HTTP/')
-            if httpLocation > 0:
-                response = self._parse_batch_response_part(part[httpLocation:])
-                responses.append(response)
-
-        return responses
-
-    def _parse_batch_response_part(self, part):
-        lines = part.splitlines();
-
-        # First line is the HTTP status/reason
-        status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
-
-        # Followed by headers and body
-        headers = []
-        body = b''
-        isBody = False
-        for line in lines[1:]:
-            if line == b'' and not isBody:
-                isBody = True
-            elif isBody:
-                body += line
-            else:
-                headerName, _, headerVal = line.partition(b':')
-                headers.append((headerName.lower(), headerVal))
-
-        return HTTPResponse(int(status), reason.strip(), headers, body)
-
-    def _report_batch_error(self, response):
-        doc = ETree.fromstring(response.body)
-
-        code_element = doc.find('./m:code', _etree_entity_feed_namespaces)
-        code = _get_etree_text(code_element) if code_element is not None else ''
-
-        message_element = doc.find('./m:message', _etree_entity_feed_namespaces)
-        message = _get_etree_text(message_element) if message_element is not None else ''
-
-        raise AzureBatchOperationError(message, response.status, code)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/httpclient.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/httpclient.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/httpclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/httpclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,226 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import base64
-import sys
-
-if sys.version_info < (3,):
-    from httplib import (
-        HTTP_PORT,
-        HTTPS_PORT,
-        )
-    from urlparse import urlparse
-    from urllib2 import quote as url_quote
-else:
-    from http.client import (
-        HTTP_PORT,
-        HTTPS_PORT,
-        )
-    from urllib.parse import urlparse
-    from urllib.parse import quote as url_quote
-
-from . import HTTPError, HTTPResponse
-from .requestsclient import _RequestsConnection
-
-
-DEBUG_REQUESTS = False
-DEBUG_RESPONSES = False
-
-class _HTTPClient(object):
-
-    '''
-    Takes the request and sends it to cloud service and returns the response.
-    '''
-
-    def __init__(self, service_instance, cert_file=None, protocol='https',
-                 request_session=None, timeout=None, user_agent=''):
-        '''
-        service_instance:
-            service client instance.
-        cert_file:
-            certificate file name/location. This is only used in hosted
-            service management.
-        protocol:
-            http or https.
-        request_session:
-            session object created with requests library (or compatible).
-        timeout:
-            timeout for the http request, in seconds.
-        user_agent:
-            user agent string to set in http header.
-        '''
-        self.service_instance = service_instance
-        self.cert_file = cert_file
-        self.protocol = protocol
-        self.proxy_host = None
-        self.proxy_port = None
-        self.proxy_user = None
-        self.proxy_password = None
-        self.request_session = request_session
-        self.timeout = timeout
-        self.user_agent = user_agent
-
-    def set_proxy(self, host, port, user, password):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        host:
-            Address of the proxy. Ex: '192.168.0.100'
-        port:
-            Port of the proxy. Ex: 6000
-        user:
-            User for proxy authorization.
-        password:
-            Password for proxy authorization.
-        '''
-        self.proxy_host = host
-        self.proxy_port = port
-        self.proxy_user = user
-        self.proxy_password = password
-
-    def get_uri(self, request):
-        ''' Return the target uri for the request.'''
-        protocol = request.protocol_override \
-            if request.protocol_override else self.protocol
-        protocol = protocol.lower()
-        port = HTTP_PORT if protocol == 'http' else HTTPS_PORT
-        return protocol + '://' + request.host + ':' + str(port) + request.path
-
-    def get_connection(self, request):
-        ''' Create connection for the request. '''
-        protocol = request.protocol_override \
-            if request.protocol_override else self.protocol
-        protocol = protocol.lower()
-        target_host = request.host
-        target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT
-
-        connection = _RequestsConnection(
-            target_host, protocol, self.request_session, self.timeout)
-        proxy_host = self.proxy_host
-        proxy_port = self.proxy_port
-
-        if self.proxy_host:
-            headers = None
-            if self.proxy_user and self.proxy_password:
-                auth = base64.encodestring(
-                    "{0}:{1}".format(self.proxy_user, self.proxy_password).encode()).rstrip()
-                headers = {'Proxy-Authorization': 'Basic {0}'.format(auth.decode())}
-            connection.set_tunnel(proxy_host, int(proxy_port), headers)
-
-        return connection
-
-    def send_request_headers(self, connection, request_headers):
-        if self.proxy_host and self.request_session is None:
-            for i in connection._buffer:
-                if i.startswith(b"Host: "):
-                    connection._buffer.remove(i)
-            connection.putheader(
-                'Host', "{0}:{1}".format(connection._tunnel_host,
-                                            connection._tunnel_port))
-
-        for name, value in request_headers:
-            if value:
-                connection.putheader(name, value)
-
-        connection.putheader('User-Agent', self.user_agent)
-        connection.endheaders()
-
-    def send_request_body(self, connection, request_body):
-        if request_body:
-            assert isinstance(request_body, bytes)
-            connection.send(request_body)
-        else:
-            connection.send(None)
-
-    def _update_request_uri_query(self, request):
-        '''pulls the query string out of the URI and moves it into
-        the query portion of the request object.  If there are already
-        query parameters on the request the parameters in the URI will
-        appear after the existing parameters'''
-
-        if '?' in request.path:
-            request.path, _, query_string = request.path.partition('?')
-            if query_string:
-                query_params = query_string.split('&')
-                for query in query_params:
-                    if '=' in query:
-                        name, _, value = query.partition('=')
-                        request.query.append((name, value))
-
-        request.path = url_quote(request.path, '/()$=\',')
-
-        # add encoded queries to request.path.
-        if request.query:
-            request.path += '?'
-            for name, value in request.query:
-                if value is not None:
-                    request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
-            request.path = request.path[:-1]
-
-        return request.path, request.query
-
-    def perform_request(self, request):
-        ''' Sends request to cloud service server and return the response. '''
-        connection = self.get_connection(request)
-        try:
-            connection.putrequest(request.method, request.path)
-
-            self.send_request_headers(connection, request.headers)
-            self.send_request_body(connection, request.body)
-
-            if DEBUG_REQUESTS and request.body:
-                print('request:')
-                try:
-                    print(request.body)
-                except:
-                    pass
-
-            resp = connection.getresponse()
-            status = int(resp.status)
-            message = resp.reason
-            respheaders = resp.getheaders()
-
-            # for consistency across platforms, make header names lowercase
-            for i, value in enumerate(respheaders):
-                respheaders[i] = (value[0].lower(), value[1])
-
-            respbody = None
-            if resp.length is None:
-                respbody = resp.read()
-            elif resp.length > 0:
-                respbody = resp.read(resp.length)
-
-            if DEBUG_RESPONSES and respbody:
-                print('response:')
-                try:
-                    print(respbody)
-                except:
-                    pass
-
-            response = HTTPResponse(
-                status, resp.reason, respheaders, respbody)
-            if status == 307:
-                new_url = urlparse(dict(headers)['location'])
-                request.host = new_url.hostname
-                request.path = new_url.path
-                request.path, request.query = self._update_request_uri_query(request)
-                return self.perform_request(request)
-            if status >= 300:
-                # This exception will be caught by the general error handler
-                # and raised as an azure http exception
-                raise HTTPError(status, message, respheaders, respbody)
-
-            return response
-        finally:
-            connection.close()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/requestsclient.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/requestsclient.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_http/requestsclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_http/requestsclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,84 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-class _Response(object):
-
-    ''' Response class corresponding to the response returned from httplib
-    HTTPConnection. '''
-
-    def __init__(self, response):
-        self.status = response.status_code
-        self.reason = response.reason
-        self.respbody = response.content
-        self.length = len(response.content)
-        self.headers = []
-        for key, name in response.headers.items():
-            self.headers.append((key.lower(), name))
-
-    def getheaders(self):
-        '''Returns response headers.'''
-        return self.headers
-
-    def read(self, _length):
-        '''Returns response body. '''
-        return self.respbody[:_length]
-
-
-class _RequestsConnection(object):
-
-    def __init__(self, host, protocol, session, timeout):
-        self.host = host
-        self.protocol = protocol
-        self.session = session
-        self.headers = {}
-        self.method = None
-        self.body = None
-        self.response = None
-        self.uri = None
-        self.timeout = timeout
-
-        # By default, requests adds an Accept:*/* to the session, which causes
-        # issues with some Azure REST APIs. Removing it here gives us the flexibility
-        # to add it back on a case by case basis via putheader.
-        if 'Accept' in self.session.headers:
-            del self.session.headers['Accept']
-
-    def close(self):
-        pass
-
-    def set_tunnel(self, host, port=None, headers=None):
-        self.session.proxies['http'] = 'http://{}:{}'.format(host, port)
-        self.session.proxies['https'] = 'https://{}:{}'.format(host, port)
-        if headers:
-            self.session.headers.update(headers)
-
-    def set_proxy_credentials(self, user, password):
-        pass
-
-    def putrequest(self, method, uri):
-        self.method = method
-        self.uri = self.protocol + '://' + self.host + uri
-
-    def putheader(self, name, value):
-        self.headers[name] = value
-
-    def endheaders(self):
-        pass
-
-    def send(self, request_body):
-        self.response = self.session.request(self.method, self.uri, data=request_body, headers=self.headers, timeout=self.timeout)
-
-    def getresponse(self):
-        return _Response(self.response)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/_serialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,304 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-from datetime import date
-from dateutil.tz import tzutc
-from time import time
-from wsgiref.handlers import format_date_time
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-    from urllib.parse import quote as url_quote
-else:
-    from cStringIO import StringIO as BytesIO
-    from urllib2 import quote as url_quote   
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ._error import (
-    _general_error_handler,
-    _ERROR_VALUE_SHOULD_BE_BYTES,
-)
-from ._constants import (
-    X_MS_VERSION,
-)
-from .models import (
-    _unicode_type,
-)
-from ._common_conversion import (
-    _str,
-)
-
-def _to_utc_datetime(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-def _update_request(request):
-    # Verify body
-    if request.body:
-        assert isinstance(request.body, bytes)
-
-    # if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
-    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
-        request.headers.append(('Content-Length', str(len(request.body))))
-
-    # append addtional headers based on the service
-    current_time = format_date_time(time())
-    request.headers.append(('x-ms-date', current_time))
-    request.headers.append(('x-ms-version', X_MS_VERSION))
-    request.headers.append(('Accept-Encoding', 'identity'))
-
-    # append x-ms-meta name, values to header
-    for name, value in request.headers:
-        if 'x-ms-meta-name-values' in name and value:
-            for meta_name, meta_value in value.items():
-                request.headers.append(('x-ms-meta-' + meta_name, meta_value))
-            request.headers.remove((name, value))
-            break
-
-    # If the host has a path component (ex local storage), move it
-    path = request.host.split('/', 1)
-    if len(path) == 2:
-        request.host = path[0]
-        request.path = '/{}{}'.format(path[1], request.path)
-
-    # Encode and optionally add local storage prefix to path
-    request.path = url_quote(request.path, '/()$=\',~')
-
-    # Add query params to path
-    if request.query:
-        request.path += '?'
-        for name, value in request.query:
-            if value is not None:
-                request.path += name + '=' + url_quote(value, '~') + '&'
-        request.path = request.path[:-1]
-
-
-def _get_request_body_bytes_only(param_name, param_value):
-    '''Validates the request body passed in and converts it to bytes
-    if our policy allows it.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _get_request_body(request_body):
-    '''Converts an object into a request body.  If it's None
-    we'll return an empty string, if it's one of our objects it'll
-    convert it to XML and return it.  Otherwise we just use the object
-    directly'''
-    if request_body is None:
-        return b''
-
-    if isinstance(request_body, bytes):
-        return request_body
-
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    request_body = str(request_body)
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    return request_body
-
-def _storage_error_handler(http_error):
-    ''' Simple error handler for storage service. '''
-    return _general_error_handler(http_error)
-
-
-def _convert_signed_identifiers_to_xml(signed_identifiers):
-    if signed_identifiers is None:
-        return ''
-
-    sis = ETree.Element('SignedIdentifiers');
-    for id, access_policy in signed_identifiers.items():
-        # Root signed identifers element
-        si = ETree.SubElement(sis, 'SignedIdentifier')
-
-        # Id element
-        ETree.SubElement(si, 'Id').text = id
-
-        # Access policy element
-        policy = ETree.SubElement(si, 'AccessPolicy')
-
-        if access_policy.start:
-            start = access_policy.start
-            if isinstance(access_policy.start, date):
-                start = _to_utc_datetime(start)
-            ETree.SubElement(policy, 'Start').text = start
-
-        if access_policy.expiry:
-            expiry = access_policy.expiry
-            if isinstance(access_policy.expiry, date):
-                expiry = _to_utc_datetime(expiry)
-            ETree.SubElement(policy, 'Expiry').text = expiry
-        
-        if access_policy.permission:
-            ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-    
-    return output
-
-def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-    </StorageServiceProperties>
-    '''
-    service_properties_element = ETree.Element('StorageServiceProperties');
-
-    # Logging
-    if logging:
-        logging_element = ETree.SubElement(service_properties_element, 'Logging')
-        ETree.SubElement(logging_element, 'Version').text = logging.version
-        ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
-        ETree.SubElement(logging_element, 'Read').text = str(logging.read)
-        ETree.SubElement(logging_element, 'Write').text = str(logging.write)
-
-        retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
-        _convert_retention_policy_to_xml(logging.retention_policy, retention_element)
-
-    # HourMetrics
-    if hour_metrics:
-        hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
-        _convert_metrics_to_xml(hour_metrics, hour_metrics_element)
-
-    # MinuteMetrics
-    if minute_metrics:
-        minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
-        _convert_metrics_to_xml(minute_metrics, minute_metrics_element)
-
-    # CORS
-    # Make sure to still serialize empty list
-    if cors is not None:
-        cors_element = ETree.SubElement(service_properties_element, 'Cors')
-        for rule in cors:
-            cors_rule = ETree.SubElement(cors_element, 'CorsRule')
-            ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
-            ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
-            ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
-            ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
-            ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
-
-    # Target version
-    if target_version:
-        ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
-
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-    
-    return output
-
-def _convert_metrics_to_xml(metrics, root):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    ETree.SubElement(root, 'Version').text = metrics.version
-
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
-
-    # IncludeAPIs
-    if metrics.enabled and metrics.include_apis is not None:
-        ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
-
-    # RetentionPolicy
-    retention_element = ETree.SubElement(root, 'RetentionPolicy')
-    _convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
-
-def _convert_retention_policy_to_xml(retention_policy, root):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
-
-    # Days
-    if retention_policy.enabled and retention_policy.days:
-        ETree.SubElement(root, 'Days').text = str(retention_policy.days)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,41 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .models import (
-    Container,
-    ContainerProperties,
-    Blob,
-    BlobProperties,
-    BlobBlock,
-    BlobBlockList,
-    PageRange,
-    ContentSettings,
-    CopyProperties,
-    ContainerPermissions,
-    BlobPermissions,
-    _LeaseActions,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    Include,
-    SequenceNumberAction,
-    BlockListType,
-    PublicAccess,
-    BlobPrefix,
-    DeleteSnapshot,
-)
-
-from .blockblobservice import BlockBlobService
-from .pageblobservice import PageBlobService
-from .appendblobservice import AppendBlobService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_chunking.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,343 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import threading
-
-from time import sleep
-from .._common_conversion import _encode_base64
-from .._serialization import url_quote
-from azure.common import (
-    AzureHttpError,
-)
-from .._error import _ERROR_NO_SINGLE_THREAD_CHUNKING
-from .models import BlobBlock
-
-
-class _BlobChunkDownloader(object):
-    def __init__(self, blob_service, container_name, blob_name, blob_size,
-                 chunk_size, start_range, end_range, stream, max_retries,
-                 retry_wait, progress_callback, if_modified_since, 
-                 if_unmodified_since, if_match, if_none_match, timeout):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.chunk_size = chunk_size
-        if start_range is not None:
-            end_range = end_range or blob_size
-            self.blob_size = end_range - start_range
-            self.blob_end = end_range
-            self.start_index = start_range
-        else:
-            self.blob_size = blob_size
-            self.blob_end = blob_size
-            self.start_index = 0
-
-        self.stream = stream
-        self.stream_start = stream.tell()
-        self.stream_lock = threading.Lock()
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock()
-        self.max_retries = max_retries
-        self.retry_wait = retry_wait
-        self.timeout = timeout
-
-        self.if_modified_since=if_modified_since
-        self.if_unmodified_since=if_unmodified_since
-        self.if_match=if_match
-        self.if_none_match=if_none_match
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.blob_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.blob_end:
-            chunk_end = self.blob_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk_with_retries(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total = self.progress_total
-                self.progress_callback(total, self.blob_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + chunk_start)
-            self.stream.write(chunk_data)
-
-    def _download_chunk_with_retries(self, chunk_start, chunk_end):
-        retries = self.max_retries
-        while True:
-            try:
-                response = self.blob_service._get_blob(
-                    self.container_name,
-                    self.blob_name,
-                    start_range=chunk_start,
-                    end_range=chunk_end - 1,
-                    if_modified_since=self.if_modified_since,
-                    if_unmodified_since=self.if_unmodified_since,
-                    if_match=self.if_match,
-                    if_none_match=self.if_none_match,
-                    timeout=self.timeout
-                )
-
-                # This makes sure that if_match is set so that we can validate 
-                # that subsequent downloads are to an unmodified blob
-                self.if_match = response.properties.etag
-                return response
-            except AzureHttpError:
-                if retries > 0:
-                    retries -= 1
-                    sleep(self.retry_wait)
-                else:
-                    raise
-
-
-class _BlobChunkUploader(object):
-    def __init__(self, blob_service, container_name, blob_name, blob_size,
-                 chunk_size, stream, parallel, max_retries, retry_wait,
-                 progress_callback, lease_id, timeout):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.blob_size = blob_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock() if parallel else None
-        self.max_retries = max_retries
-        self.retry_wait = retry_wait
-        self.lease_id = lease_id
-        self.timeout = timeout
-
-    def get_chunk_offsets(self):
-        index = 0
-        if self.blob_size is None:
-            # we don't know the size of the stream, so we have no
-            # choice but to seek
-            while True:
-                data = self._read_from_stream(index, 1)
-                if not data:
-                    break
-                yield index
-                index += self.chunk_size
-        else:
-            while index < self.blob_size:
-                yield index
-                index += self.chunk_size
-
-    def process_chunk(self, chunk_offset):
-        size = self.chunk_size
-        if self.blob_size is not None:
-            size = min(size, self.blob_size - chunk_offset)
-        chunk_data = self._read_from_stream(chunk_offset, size)
-        return self._upload_chunk_with_retries(chunk_offset, chunk_data)
-
-    def process_all_unknown_size(self):
-        assert self.stream_lock is None
-        range_ids = []
-        index = 0
-        while True:
-            data = self._read_from_stream(None, self.chunk_size)
-            if data:
-                range_id = self._upload_chunk_with_retries(index, data)
-                index += len(data)
-                range_ids.append(range_id)
-            else:
-                break
-
-        return range_ids
-
-    def _read_from_stream(self, offset, count):
-        if self.stream_lock is not None:
-            with self.stream_lock:
-                self.stream.seek(self.stream_start + offset)
-                data = self.stream.read(count)
-        else:
-            data = self.stream.read(count)
-        return data
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.blob_size)
-
-    def _upload_chunk_with_retries(self, chunk_offset, chunk_data):
-        retries = self.max_retries
-        while True:
-            try:
-                range_id = self._upload_chunk(chunk_offset, chunk_data) 
-                self._update_progress(len(chunk_data))
-                return range_id
-            except AzureHttpError:
-                if retries > 0:
-                    retries -= 1
-                    sleep(self.retry_wait)
-                else:
-                    raise
-
-
-class _BlockBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        block_id=url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
-        self.blob_service.put_block(
-            self.container_name,
-            self.blob_name,
-            chunk_data,
-            block_id,
-            lease_id=self.lease_id,
-            timeout=self.timeout,
-        )
-        return BlobBlock(block_id)
-
-
-class _PageBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_start, chunk_data):
-        chunk_end = chunk_start + len(chunk_data) - 1
-        resp = self.blob_service.update_page(
-            self.container_name,
-            self.blob_name,
-            chunk_data,
-            chunk_start,
-            chunk_end,
-            lease_id=self.lease_id,
-            if_match=self.if_match,
-            timeout=self.timeout,
-        )
-
-        if not self.parallel:
-            self.if_match = resp.etag
-
-class _AppendBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if not hasattr(self, 'current_length'):
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                timeout=self.timeout,
-            )
-
-            self.current_length = resp.append_offset
-        else:
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                appendpos_condition=self.current_length + chunk_offset,
-                timeout=self.timeout,
-            )
-
-
-def _download_blob_chunks(blob_service, container_name, blob_name,
-                          blob_size, block_size, start_range, end_range, stream,
-                          max_connections, max_retries, retry_wait, progress_callback,
-                          if_modified_since, if_unmodified_since, if_match, if_none_match, 
-                          timeout):
-    if max_connections <= 1:
-        raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('blob'))
-
-    downloader = _BlobChunkDownloader(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        start_range,
-        end_range,
-        stream,
-        max_retries,
-        retry_wait,
-        progress_callback,
-        if_modified_since,
-        if_unmodified_since,
-        if_match,
-        if_none_match,
-        timeout
-    )
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    import concurrent.futures
-    executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-    result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-
-
-def _upload_blob_chunks(blob_service, container_name, blob_name,
-                        blob_size, block_size, stream, max_connections,
-                        max_retries, retry_wait, progress_callback,
-                        lease_id, uploader_class, maxsize_condition=None, 
-                        if_match=None, timeout=None):
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        max_retries,
-        retry_wait,
-        progress_callback,
-        lease_id,
-        timeout
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # ETag matching does not work with parallelism as a ranged upload may start 
-    # before the previous finishes and provides an etag
-    uploader.if_match = if_match if not max_connections > 1 else None
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
-    else:
-        if blob_size is not None:
-            range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
-        else:
-            range_ids = uploader.process_all_unknown_size()
-
-    return range_ids
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,389 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from dateutil import parser
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from .._common_conversion import (
-    _decode_base64_to_text,
-    _to_str,
-)
-from .._deserialization import (
-    _parse_properties,
-    _int_to_str,
-    _parse_metadata,
-    _parse_response_for_dict,
-    _convert_xml_to_signed_identifiers,
-)
-from .models import (
-    Container,
-    Blob,
-    BlobBlock,
-    BlobBlockList,
-    BlobBlockState,
-    BlobProperties,
-    PageRange,
-    ContainerProperties,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    BlobPrefix,
-)
-from ..models import _list
-
-def _parse_base_properties(response):
-    '''
-    Extracts basic response headers.
-    '''   
-    raw_headers = _parse_response_for_dict(response)
-
-    resource_properties = ResourceProperties()
-    resource_properties.last_modified = parser.parse(raw_headers.get('last-modified'))
-    resource_properties.etag = raw_headers.get('etag')
-
-    return resource_properties
-
-def _parse_page_properties(response):
-    '''
-    Extracts page response headers.
-    '''   
-    raw_headers = _parse_response_for_dict(response)
-
-    put_page = PageBlobProperties()
-    put_page.last_modified = parser.parse(raw_headers.get('last-modified'))
-    put_page.etag = raw_headers.get('etag')
-    put_page.sequence_number = _int_to_str(raw_headers.get('x-ms-blob-sequence-number'))
-
-    return put_page
-
-def _parse_append_block(response):
-    '''
-    Extracts append block response headers.
-    '''   
-    raw_headers = _parse_response_for_dict(response)
-
-    append_block = AppendBlockProperties()
-    append_block.last_modified = parser.parse(raw_headers.get('last-modified'))
-    append_block.etag = raw_headers.get('etag')
-    append_block.append_offset = _int_to_str(raw_headers.get('x-ms-blob-append-offset'))
-    append_block.committed_block_count = _int_to_str(raw_headers.get('x-ms-blob-committed-block-count'))
-
-    return append_block
-
-def _parse_snapshot_blob(name, response):
-    '''
-    Extracts snapshot return header.
-    '''   
-    raw_headers = _parse_response_for_dict(response)
-    snapshot = raw_headers.get('x-ms-snapshot')
-
-    return _parse_blob(name, snapshot, response)
-
-def _parse_lease_time(response):
-    '''
-    Extracts lease time return header.
-    '''   
-    raw_headers = _parse_response_for_dict(response)
-    lease_time = raw_headers.get('x-ms-lease-time')
-    if lease_time:
-        lease_time = _int_to_str(lease_time)
-
-    return lease_time
-
-def _parse_lease_id(response):
-    '''
-    Extracts lease ID return header.
-    '''   
-    raw_headers = _parse_response_for_dict(response)
-    lease_id = raw_headers.get('x-ms-lease-id')
-
-    return lease_id
-
-def _parse_blob(name, snapshot, response):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, BlobProperties)
-    return Blob(name, snapshot, response.body, props, metadata)
-
-def _parse_container(name, response):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ContainerProperties)
-    return Container(name, props, metadata)
-
-def _convert_xml_to_signed_identifiers_and_access(response):
-    acl = _convert_xml_to_signed_identifiers(response.body)
-
-    raw_headers = _parse_response_for_dict(response)
-    acl.public_access = raw_headers.get('x-ms-blob-public-access')
-
-    return acl
-
-def _convert_xml_to_containers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.blob.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Containers>
-        <Container>
-          <Name>container-name</Name>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <LeaseStatus>locked | unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>      
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Container>
-      </Containers>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    containers = _list()
-    list_element = ETree.fromstring(response.body)
-    
-    # Set next marker
-    setattr(containers, 'next_marker', list_element.findtext('NextMarker'))
-
-    containers_element = list_element.find('Containers')
-
-    for container_element in containers_element.findall('Container'):
-        # Name element
-        container = Container()
-        container.name = container_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = container_element.find('Metadata')
-        if metadata_root_element is not None:
-            container.metadata = dict()
-            for metadata_element in metadata_root_element:
-                container.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = container_element.find('Properties')
-        container.properties.etag = properties_element.findtext('Etag')
-        container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        container.properties.lease_status = properties_element.findtext('LeaseStatus')
-        container.properties.lease_state = properties_element.findtext('LeaseState')
-        container.properties.lease_duration = properties_element.findtext('LeaseDuration')
-        
-        # Add container to list
-        containers.append(container)
-
-    return containers
-
-LIST_BLOBS_ATTRIBUTE_MAP = {
-    'Last-Modified': (None, 'last_modified', parser.parse),
-    'Etag': (None, 'etag', _to_str),
-    'x-ms-blob-sequence-number': (None, 'sequence_number', _int_to_str),
-    'BlobType': (None, 'blob_type', _to_str),
-    'Content-Length': (None, 'content_length', _int_to_str),
-    'Content-Type': ('content_settings', 'content_type', _to_str),
-    'Content-Encoding': ('content_settings', 'content_encoding', _to_str),
-    'Content-Disposition': ('content_settings', 'content_disposition', _to_str),
-    'Content-Language': ('content_settings', 'content_language', _to_str),
-    'Content-MD5': ('content_settings', 'content_md5', _to_str),
-    'Cache-Control': ('content_settings', 'cache_control', _to_str),
-    'LeaseStatus': ('lease', 'status', _to_str),
-    'LeaseState': ('lease', 'state', _to_str),
-    'LeaseDuration': ('lease', 'duration', _to_str),
-    'CopyId': ('copy', 'id', _to_str),
-    'CopySource': ('copy', 'source', _to_str),
-    'CopyStatus': ('copy', 'status', _to_str),
-    'CopyProgress': ('copy', 'progress', _to_str),
-    'CopyCompletionTime': ('copy', 'completion_time', _to_str),
-    'CopyStatusDescription': ('copy', 'status_description', _to_str),
-}
-
-def _convert_xml_to_blob_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Delimiter>string-value</Delimiter>
-      <Blobs>
-        <Blob>
-          <Name>blob-name</name>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date-time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Content-Length>size-in-bytes</Content-Length>
-            <Content-Type>blob-content-type</Content-Type>
-            <Content-Encoding />
-            <Content-Language />
-            <Content-MD5 />
-            <Cache-Control />
-            <x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
-            <BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
-            <LeaseStatus>locked|unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <CopyId>id</CopyId>
-            <CopyStatus>pending | success | aborted | failed </CopyStatus>
-            <CopySource>source url</CopySource>
-            <CopyProgress>bytes copied/bytes total</CopyProgress>
-            <CopyCompletionTime>datetime</CopyCompletionTime>
-            <CopyStatusDescription>error string</CopyStatusDescription>
-          </Properties>
-          <Metadata>   
-            <Name>value</Name>
-          </Metadata>
-        </Blob>
-        <BlobPrefix>
-          <Name>blob-prefix</Name>
-        </BlobPrefix>
-      </Blobs>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    blob_list = _list()    
-    list_element = ETree.fromstring(response.body)
-
-    setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
-
-    blobs_element = list_element.find('Blobs')
-    blob_prefix_elements = blobs_element.findall('BlobPrefix')
-    if blob_prefix_elements is not None:
-        for blob_prefix_element in blob_prefix_elements:
-            prefix = BlobPrefix()
-            prefix.name = blob_prefix_element.findtext('Name')
-            blob_list.append(prefix)
-
-    for blob_element in blobs_element.findall('Blob'):
-        blob = Blob()
-        blob.name = blob_element.findtext('Name')
-        blob.snapshot = blob_element.findtext('Snapshot')
-
-        # Properties
-        properties_element = blob_element.find('Properties')
-        if properties_element is not None:
-            for property_element in properties_element:
-                info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag)
-                if info is None:
-                    setattr(blob.properties, property_element.tag, _to_str(property_element.text))                   
-                elif info[0] is None:
-                    setattr(blob.properties, info[1], info[2](property_element.text))
-                else:
-                    attr = getattr(blob.properties, info[0])
-                    setattr(attr, info[1], info[2](property_element.text))
-
-
-        # Metadata
-        metadata_root_element = blob_element.find('Metadata')
-        if metadata_root_element is not None:
-            blob.metadata = dict()
-            for metadata_element in metadata_root_element:
-                blob.metadata[metadata_element.tag] = metadata_element.text
-        
-        # Add blob to list
-        blob_list.append(blob)
-
-    return blob_list
-
-def _convert_xml_to_block_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <CommittedBlocks>
-         <Block>
-            <Name>base64-encoded-block-id</Name>
-            <Size>size-in-bytes</Size>
-         </Block>
-      </CommittedBlocks>
-      <UncommittedBlocks>
-        <Block>
-          <Name>base64-encoded-block-id</Name>
-          <Size>size-in-bytes</Size>
-        </Block>
-      </UncommittedBlocks>
-     </BlockList>
-
-    Converts xml response to block list class.
-    '''
-    if response is None or response.body is None:
-        return response
-
-    block_list = BlobBlockList()
-
-    list_element = ETree.fromstring(response.body)
-
-    committed_blocks_element = list_element.find('CommittedBlocks')
-    for block_element in committed_blocks_element.findall('Block'):
-        block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-        block_size = int(block_element.findtext('Size'))
-        block = BlobBlock(id=block_id, state=BlobBlockState.Committed)
-        block._set_size(block_size)
-        block_list.committed_blocks.append(block)
-
-    uncommitted_blocks_element = list_element.find('UncommittedBlocks')
-    for block_element in uncommitted_blocks_element.findall('Block'):
-        block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-        block_size = int(block_element.findtext('Size'))
-        block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted)
-        block._set_size(block_size)
-        block_list.uncommitted_blocks.append(block)
-
-    return block_list
-
-def _convert_xml_to_page_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <PageList>
-       <PageRange>
-          <Start>Start Byte</Start>
-          <End>End Byte</End>
-       </PageRange>
-       <PageRange>
-          <Start>Start Byte</Start>
-          <End>End Byte</End>
-       </PageRange>
-    </PageList>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    page_list = list()
-
-    list_element = ETree.fromstring(response.body)
-
-    page_range_elements = list_element.findall('PageRange')
-    for page_range_element in page_range_elements:
-        page_list.append(
-            PageRange(
-                int(page_range_element.findtext('Start')),
-                int(page_range_element.findtext('End'))
-            )
-        )
-
-    return page_list
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_error.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,38 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
-    'Invalid page blob size: {0}. ' + \
-    'The size must be aligned to a 512-byte boundary.'
-
-_ERROR_PAGE_BLOB_START_ALIGNMENT = \
-    'start_range must align with 512 page size'
-
-_ERROR_PAGE_BLOB_END_ALIGNMENT = \
-    'end_range must align with 512 page size'
-
-_ERROR_INVALID_BLOCK_ID = \
-    'All blocks in block list need to have valid block ids.'
-
-_ERROR_INVALID_LEASE_DURATION = \
-    "lease_duration param needs to be between 15 and 60 or -1."
-
-_ERROR_INVALID_LEASE_BREAK_PERIOD = \
-    "lease_break_period param needs to be between 0 and 60."
-
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use blob chunk downloader more than 1 thread must be ' + \
-    'used since get_blob_to_bytes should be called for single threaded ' + \
-    'blob downloads.'
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_serialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,123 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from xml.sax.saxutils import escape as xml_escape
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from .._common_conversion import (
-    _encode_base64,
-    _str,
-)
-from .._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-from ._error import (
-    _ERROR_PAGE_BLOB_START_ALIGNMENT,
-    _ERROR_PAGE_BLOB_END_ALIGNMENT,
-    _ERROR_INVALID_BLOCK_ID,
-)
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-def _get_path(container_name=None, blob_name=None):
-    '''
-    Creates the path to access a blob resource.
-
-    container_name:
-        Name of container.
-    blob_name:
-        The path to the blob.
-    '''
-    if container_name and blob_name:
-        return '/{0}/{1}'.format(
-            _str(container_name),
-            _str(blob_name))
-    elif container_name:
-        return '/{0}'.format(_str(container_name))
-    else:
-        return '/'
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, end_range_required=True, check_content_md5=False, align_to_page=False):
-    request.headers = request.headers or []
-    if start_range_required == True:
-        _validate_not_none('start_range', start_range)
-    if end_range_required == True:
-        _validate_not_none('end_range', end_range)
-    _validate_page_ranges(start_range, end_range, align_to_page)
-    if end_range is not None:
-        request.headers.append(('x-ms-range', "bytes={0}-{1}".format(start_range, end_range)))
-    else:
-        request.headers.append(('x-ms-range', "bytes={0}-".format(start_range)))
-
-    if check_content_md5 == True:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers.append(('x-ms-range-get-content-md5', 'true'))
-
-def _validate_page_ranges(start_range, end_range, align_to_page):
-    if align_to_page == True:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT)
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT)
-
-def _convert_block_list_to_xml(block_id_list):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <Committed>first-base64-encoded-block-id</Committed>
-      <Uncommitted>second-base64-encoded-block-id</Uncommitted>
-      <Latest>third-base64-encoded-block-id</Latest>
-    </BlockList>
-
-    Convert a block list to xml to send.
-
-    block_id_list:
-        A list of BlobBlock containing the block ids and block state that are used in put_block_list.
-    Only get block from latest blocks.
-    '''
-    if block_id_list is None:
-        return ''
-
-    block_list_element = ETree.Element('BlockList');
-    
-    # Enabled
-    for block in block_id_list:
-        if block.id is None:
-            raise ValueError(_ERROR_INVALID_BLOCK_ID)
-        id = xml_escape(_str(format(_encode_base64(block.id))))
-        ETree.SubElement(block_list_element, block.state).text = id
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-    
-    # return xml value
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/appendblobservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/appendblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/appendblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/appendblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,512 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-)
-from .._common_conversion import (
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-)
-from .._serialization import (
-    _get_request_body_bytes_only,
-)
-from .._http import HTTPRequest
-from ._chunking import (
-    _AppendBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .models import _BlobTypes
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ._serialization import (
-    _get_path,
-)
-from ._deserialization import (
-    _parse_append_block,
-    _parse_base_properties,
-)
-from .baseblobservice import BaseBlobService
-from os import path
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class AppendBlobService(BaseBlobService):
-    '''
-    An append blob is comprised of blocks and is optimized for append operations.
-    When you modify an append blob, blocks are added to the end of the blob only,
-    via the append_block operation. Updating or deleting of existing blocks is not
-    supported. Unlike a block blob, an append blob does not expose its block IDs. 
-
-    Each block in an append blob can be a different size, up to a maximum of 4 MB,
-    and an append blob can include up to 50,000 blocks. The maximum size of an
-    append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
-    '''
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        self.blob_type = _BlobTypes.AppendBlob
-        super(AppendBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, 
-            custom_domain, request_session, connection_string)
-
-    def create_blob(self, container_name, blob_name, content_settings=None,
-                    metadata=None, lease_id=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a blob or overrides an existing blob. Use if_match=* to
-        prevent overriding an existing blob. 
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to
-            perform the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [
-            ('x-ms-blob-type', _to_str(self.blob_type)),
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match))
-        ]
-        if content_settings is not None:
-            request.headers += content_settings._to_headers()
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def append_block(self, container_name, blob_name, block,
-                     content_md5=None, maxsize_condition=None,
-                     appendpos_condition=None,
-                     lease_id=None, if_modified_since=None,
-                     if_unmodified_since=None, if_match=None,
-                     if_none_match=None, timeout=None):
-        '''
-        Commits a new block of data to the end of an existing append blob.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes block:
-            Content of the block in bytes.
-        :param int content_md5:
-            An MD5 hash of the block content. This hash is used to
-            verify the integrity of the blob during transport. When this
-            header is specified, the storage service checks the hash that has
-            arrived with the one that was sent.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 – Precondition Failed).
-        :param int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 – Precondition Failed).
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            ETag, last modified, append offset, and committed block count 
-            properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'appendblock'),
-            ('timeout', _int_to_str(timeout)),
-         ]
-        request.headers = [
-            ('Content-MD5', _to_str(content_md5)),
-            ('x-ms-blob-condition-maxsize', _to_str(maxsize_condition)),
-            ('x-ms-blob-condition-appendpos', _to_str(appendpos_condition)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match))
-        ]
-        request.body = _get_request_body_bytes_only('block', block)
-
-        response = self._perform_request(request)
-        return _parse_append_block(response)
-
-    #----Convenience APIs----------------------------------------------
-
-    def append_blob_from_path(
-        self, container_name, blob_name, file_path,
-        maxsize_condition=None, progress_callback=None,
-        max_retries=5, retry_wait=1.0, lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from a file path, with automatic
-        chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 – Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            self.append_blob_from_stream(
-                container_name,
-                blob_name,
-                stream,
-                count=count,
-                maxsize_condition=maxsize_condition,
-                progress_callback=progress_callback,
-                max_retries=max_retries,
-                retry_wait=retry_wait,
-                lease_id=lease_id,
-                timeout=timeout)
-
-    def append_blob_from_bytes(
-        self, container_name, blob_name, blob, index=0, count=None,
-        maxsize_condition=None, progress_callback=None,
-        max_retries=5, retry_wait=1.0, lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from an array of bytes, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 – Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        self.append_blob_from_stream(
-            container_name,
-            blob_name,
-            stream,
-            count=count,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            timeout=timeout)
-
-    def append_blob_from_text(
-        self, container_name, blob_name, text, encoding='utf-8',
-        maxsize_condition=None, progress_callback=None,
-        max_retries=5, retry_wait=1.0, lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from str/unicode, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 – Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        self.append_blob_from_bytes(
-            container_name,
-            blob_name,
-            text,
-            index=0,
-            count=len(text),
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            timeout=timeout)
-
-    def append_blob_from_stream(
-        self, container_name, blob_name, stream, count=None,
-        maxsize_condition=None, progress_callback=None, 
-        max_retries=5, retry_wait=1.0, lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from a file/stream, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param int maxsize_condition:
-            Conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 – Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_BLOCK_SIZE,
-            stream=stream,
-            max_connections=1, # upload not easily parallelizable
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            progress_callback=progress_callback,
-            lease_id=lease_id,
-            uploader_class=_AppendBlobChunkUploader,
-            maxsize_condition=maxsize_condition,
-            timeout=timeout
-        )
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/baseblobservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/baseblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/baseblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/baseblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2830 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from azure.common import AzureHttpError
-from .._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_PARALLEL_NOT_SEEKABLE
-)
-from ._error import (
-    _ERROR_INVALID_LEASE_DURATION,
-    _ERROR_INVALID_LEASE_BREAK_PERIOD,
-)
-from .._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-)
-from abc import ABCMeta
-from .._serialization import (
-    _get_request_body,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from .._http import HTTPRequest
-from ._chunking import _download_blob_chunks
-from ..models import (
-    Services,
-    ListGenerator,
-)
-from .models import (
-    Blob,
-    BlobProperties,
-    _LeaseActions,
-    ContainerPermissions,
-    BlobPermissions,
-    Container,
-    ContainerProperties,
-)
-from .._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-    _StorageNoAuthentication,
-)
-from .._connection import _ServiceParameters
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from .._deserialization import (
-    _convert_xml_to_service_properties,
-    _get_download_size,
-    _parse_metadata,
-    _parse_properties,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._deserialization import (
-    _convert_xml_to_containers,
-    _parse_blob,
-    _convert_xml_to_blob_list,
-    _parse_container,
-    _parse_snapshot_blob,
-    _parse_lease_time,
-    _parse_lease_id,
-    _convert_xml_to_signed_identifiers_and_access,
-    _parse_base_properties,
-)
-from ..sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from ..storageclient import StorageClient
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-class BaseBlobService(StorageClient):
-
-    '''
-    This is the main class managing Blob resources.
-
-    The Blob service stores text and binary data as blobs in the cloud.
-    The Blob service offers the following three resources: the storage account,
-    containers, and blobs. Within your storage account, containers provide a
-    way to organize sets of blobs. For more information please see:
-    https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx
-    '''
-
-    __metaclass__ = ABCMeta
-    MAX_SINGLE_GET_SIZE = 64 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'blob',
-            account_name=account_name, 
-            account_key=account_key,
-            sas_token=sas_token, 
-            is_emulated=is_emulated,
-            protocol=protocol, 
-            endpoint_suffix=endpoint_suffix,
-            custom_domain=custom_domain,
-            request_session=request_session,
-            connection_string=connection_string)
-
-        super(BaseBlobService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            self.authentication = _StorageNoAuthentication()
-
-    def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None):
-        '''
-        Creates the url to access a blob.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when BaseBlobService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: blob access URL.
-        :rtype: str
-        '''
-
-        url = '{}://{}/{}/{}'.format(
-            protocol or self.protocol,
-            self.primary_endpoint,
-            container_name,
-            blob_name,
-        )
-
-        if sas_token:
-            url += '?' + sas_token
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission, 
-                                        expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the blob service.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.BLOB, resource_types, permission, 
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_container_shared_access_signature(self, container_name, 
-                        permission=None, expiry=None, 
-                        start=None, id=None, ip=None, protocol=None,
-                        cache_control=None, content_disposition=None,
-                        content_encoding=None, content_language=None,
-                        content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_container(
-            container_name,
-            permission, 
-            expiry,
-            start=start, 
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_blob_shared_access_signature(
-        self, container_name, blob_name, permission=None, 
-        expiry=None, start=None, id=None, ip=None, protocol=None,
-        cache_control=None, content_disposition=None,
-        content_encoding=None, content_language=None,
-        content_type=None):
-        '''
-        Generates a shared access signature for the blob.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_blob(
-            container_name,
-            blob_name,
-            permission, 
-            expiry,
-            start=start, 
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def list_containers(self, prefix=None, num_results=None, include_metadata=False, 
-                        marker=None, timeout=None):
-        '''
-        Returns a generator to list the containers under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all containers have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        containers, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        include = 'metadata' if include_metadata else None
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, 
-                'include': include, 'timeout': timeout}
-        resp = self._list_containers(**kwargs)
-
-        return ListGenerator(resp, self._list_containers, (), kwargs)
-
-
-    def _list_containers(self, prefix=None, marker=None, max_results=None, 
-                         include=None, timeout=None):
-        '''
-        Returns a list of the containers under the specified account.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body. set this
-            parameter to string 'metadata' to get container's metadata.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path() 
-        request.query = [
-            ('comp', 'list'),
-            ('prefix', _to_str(prefix)),
-            ('marker', _to_str(marker)),
-            ('maxresults', _int_to_str(max_results)),
-            ('include', _to_str(include)),
-            ('timeout', _int_to_str(timeout))
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_containers(response)
-
-    def create_container(self, container_name, metadata=None,
-                         public_access=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails if
-        fail_on_exist is True.
-
-        :param str container_name:
-            Name of container to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            container as metadata. Example:{'Category':'test'}
-        :type metadata: a dict mapping str to str
-        :param public_access:
-            Possible values include: container, blob.
-        :type public_access:
-            One of the values listed in the :class:`~azure.storage.blob.models.PublicAccess` enum.
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the container exists.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is created, False if container already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('timeout', _int_to_str(timeout)),]
-        request.headers = [
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-blob-public-access', _to_str(public_access))
-        ]
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_container_properties(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata and system properties for the specified
-        container. The data returned does not include the container's list of blobs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_properties only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified container within a container object.
-        :rtype: :class:`~azure.storage.blob.models.Container`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-lease-id', _to_str(lease_id))]
-
-        response = self._perform_request(request)
-        return _parse_container(container_name, response)
-
-    def get_container_metadata(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the container metadata name, value pairs.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-lease-id', _to_str(lease_id))]
-
-        response = self._perform_request(request)
-        return _parse_metadata(response)
-
-    def set_container_metadata(self, container_name, metadata=None,
-                               lease_id=None, if_modified_since=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        container. Each call to this operation replaces all existing metadata
-        attached to the container. To remove all metadata from the container,
-        call this operation with no metadata dict.
-
-        :param str container_name:
-            Name of existing container.
-        :param metadata:
-            A dict containing name-value pairs to associate with the container as 
-            metadata. Example: {'category':'test'}
-        :type metadata: a dict mapping str to str
-        :param str lease_id:
-            If specified, set_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-meta-name-values', metadata),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def get_container_acl(self, container_name, lease_id=None, timeout=None):
-        '''
-        Gets the permissions for the specified container.
-        The permissions indicate whether container data may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param lease_id:
-            If specified, get_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the container.
-        :rtype:
-            dict of str to :class:`.AccessPolicy` and a public_access property
-            if public access is turned on
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-lease-id', _to_str(lease_id))]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_signed_identifiers_and_access(response)
-
-    def set_container_acl(self, container_name, signed_identifiers=None,
-                          public_access=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Sets the permissions for the specified container or stored access 
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the container. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict of str to :class:`.AccessPolicy`
-        :param public_access:
-            Possible values include: container, blob.
-        :type public_access: 
-            One of the values listed in the :class:`~azure.storage.blob.models.PublicAccess` enum.
-        :param str lease_id:
-            If specified, set_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-blob-public-access', _to_str(public_access)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-        ]
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def delete_container(self, container_name, fail_not_exist=False,
-                         lease_id=None, if_modified_since=None,
-                         if_unmodified_since=None, timeout=None):
-        '''
-        Marks the specified container for deletion. The container and any blobs
-        contained within it are later deleted during garbage collection.
-
-        :param str container_name:
-            Name of container to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the container doesn't
-            exist.
-        :param str lease_id:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is deleted, False container doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),          
-        ]
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def _lease_container_impl(
-        self, container_name, lease_action, lease_id, lease_duration,
-        lease_break_period, proposed_lease_id, if_modified_since,
-        if_unmodified_since, timeout):
-        '''
-        Establishes and manages a lock on a container for delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-        The Lease Container operation can be called in one of five modes
-            Acquire, to request a new lease
-            Renew, to renew an existing lease
-            Change, to change the ID of an existing lease
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the container
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_action:
-            Possible _LeaseActions values: acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the container has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. For backwards compatibility, the default is
-            60, and the value is only used on an acquire operation.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for Acquire, required for Change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('comp', 'lease'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-lease-action', _to_str(lease_action)),
-            ('x-ms-lease-duration', _to_str(lease_duration)),
-            ('x-ms-lease-break-period', _to_str(lease_break_period)),
-            ('x-ms-proposed-lease-id', _to_str(proposed_lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-        ]
-
-        return self._perform_request(request)
-
-    def acquire_container_lease(
-        self, container_name, lease_duration=-1, proposed_lease_id=None,
-        if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Acquires a lock on a container for delete operations.
-        The lock duration can be 15 to 60 seconds or infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-        if lease_duration != -1 and\
-           (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-
-        response = self._lease_container_impl(container_name, 
-                                          _LeaseActions.Acquire,
-                                          None, # lease_id
-                                          lease_duration,
-                                          None, # lease_break_period
-                                          proposed_lease_id,
-                                          if_modified_since,
-                                          if_unmodified_since,
-                                          timeout)
-        return _parse_lease_id(response)
-
-    def renew_container_lease(
-        self, container_name, lease_id, if_modified_since=None,
-        if_unmodified_since=None, timeout=None):
-        '''
-        Renews a lock on a container for delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        response =  self._lease_container_impl(container_name, 
-                                          _LeaseActions.Renew,
-                                          lease_id,
-                                          None, # lease_duration
-                                          None, # lease_break_period
-                                          None, # proposed_lease_id
-                                          if_modified_since,
-                                          if_unmodified_since,
-                                          timeout)
-        return _parse_lease_id(response)
-
-    def release_container_lease(
-        self, container_name, lease_id, if_modified_since=None,
-        if_unmodified_since=None, timeout=None):
-        '''
-        Releases a lock on a container for delete operations, to free the
-        lease if it is no longer needed so that another client may
-        immediately acquire a lease against the container. The lock duration
-        can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name, 
-                                    _LeaseActions.Release,
-                                    lease_id,
-                                    None, # lease_duration
-                                    None, # lease_break_period
-                                    None, # proposed_lease_id
-                                    if_modified_since,
-                                    if_unmodified_since,
-                                    timeout)
-
-    def break_container_lease(
-        self, container_name, lease_break_period=None,
-        if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Breaks a lock on a container for delete operations.
-        Use to end the lease but ensure that another client cannot
-        acquire a new lease until the current lease period has expired.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-        
-        response = self._lease_container_impl(container_name, 
-                                          _LeaseActions.Break,
-                                          None, # lease_id
-                                          None, # lease_duration
-                                          lease_break_period,
-                                          None, # proposed_lease_id
-                                          if_modified_since,
-                                          if_unmodified_since,
-                                          timeout)
-        return _parse_lease_time(response)
-
-    def change_container_lease(
-        self, container_name, lease_id, proposed_lease_id,
-        if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Changes the lease ID for a lock on a container for delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name, 
-                                    _LeaseActions.Change,
-                                    lease_id,
-                                    None, # lease_duration
-                                    None, # lease_break_period
-                                    proposed_lease_id,
-                                    if_modified_since,
-                                    if_unmodified_since,
-                                    timeout)
-
-    def list_blobs(self, container_name, prefix=None, num_results=None, include=None, 
-                   delimiter=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all blobs have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        blobs, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`BlobPrefix` elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param ~azure.storage.blob.models.Include include:
-            Specifies one or more additional datasets to include in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
-            result list that acts as a placeholder for all blobs whose names begin
-            with the same substring up to the appearance of the delimiter character.
-            The delimiter may be a single character or a string.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        args = (container_name,)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, 
-                'include': include, 'delimiter': delimiter, 'timeout': timeout}
-        resp = self._list_blobs(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_blobs, args, kwargs)
-
-    def _list_blobs(self, container_name, prefix=None, marker=None,
-                   max_results=None, include=None, delimiter=None, timeout=None):
-        '''
-        Returns the list of blobs under the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :parm str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`BlobPrefix` elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str include:
-            Specifies one or more datasets to include in the
-            response. To specify more than one of these options on the URI,
-            you must separate each option with a comma. Valid values are:
-                snapshots:
-                    Specifies that snapshots should be included in the
-                    enumeration. Snapshots are listed from oldest to newest in
-                    the response.
-                metadata:
-                    Specifies that blob metadata be returned in the response.
-                uncommittedblobs:
-                    Specifies that blobs for which blocks have been uploaded,
-                    but which have not been committed using Put Block List
-                    (REST API), be included in the response.
-                copy:
-                    Version 2012-02-12 and newer. Specifies that metadata
-                    related to any current or previous Copy Blob operation
-                    should be included in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`BlobPrefix` element in the response body that acts as a
-            placeholder for all blobs whose names begin with the same
-            substring up to the appearance of the delimiter character. The
-            delimiter may be a single character or a string.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name)
-        request.query = [
-            ('restype', 'container'),
-            ('comp', 'list'),
-            ('prefix', _to_str(prefix)),
-            ('delimiter', _to_str(delimiter)),
-            ('marker', _to_str(marker)),
-            ('maxresults', _int_to_str(max_results)),
-            ('include', _to_str(include)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_blob_list(response)
-
-    def set_blob_service_properties(
-        self, logging=None, hour_metrics=None, minute_metrics=None,
-        cors=None, target_version=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Blob service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param Logging logging:
-            Groups the Azure Analytics Logging settings.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for blobs.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for blobs.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list of :class:`CorsRule`
-        :param string target_version:
-            Indicates the default version to use for requests if an incoming 
-            request's version is not specified. 
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path() 
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version))
-
-        self._perform_request(request)
-
-    def get_blob_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service properties.
-        :rtype:
-            :class:`~azure.storage.models.ServiceProperties` with an attached
-            target_version property
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path() 
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_service_properties(response.body)
-
-    def get_blob_properties(
-        self, container_name, blob_name, snapshot=None, lease_id=None,
-        if_modified_since=None, if_unmodified_since=None, if_match=None,
-        if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the blob. It does not return the content of the blob.
-        Returns :class:`.Blob` with :class:`.BlobProperties` and a metadata dict.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: a blob object including properties and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('snapshot', _to_str(snapshot)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_blob(blob_name, snapshot, response)
-
-    def set_blob_properties(
-        self, container_name, blob_name, content_settings=None, lease_id=None,
-        if_modified_since=None, if_unmodified_since=None, if_match=None,
-        if_none_match=None, timeout=None):
-        '''
-        Sets system properties on the blob. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers += [
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-            ('x-ms-lease-id', _to_str(lease_id))
-        ]
-        if content_settings is not None:
-            request.headers += content_settings._to_headers()
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def exists(self, container_name, blob_name=None, snapshot=None, timeout=None):
-        '''
-        Returns a boolean indicating whether the container exists (if blob_name 
-        is None), or otherwise a boolean indicating whether the blob exists.
-
-        :param str container_name:
-            Name of a container.
-        :param str blob_name:
-            Name of a blob. If None, the container will be checked for existence.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the snapshot.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        try:
-            if blob_name is None:
-                self.get_container_properties(container_name, timeout=timeout)
-            else:
-                self.get_blob_properties(container_name, blob_name, snapshot=snapshot, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def _get_blob(
-        self, container_name, blob_name, snapshot=None, start_range=None,
-        end_range=None, range_get_content_md5=None, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Downloads a blob's content, metadata, and properties. You can also
-        call this API to read a snapshot. You can specify a range if you don't
-        need to download the blob in its entirety. If no range is specified,
-        the full blob will be downloaded.
-
-        See get_blob_to_* for high level functions that handle the download
-        of large blobs with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A Blob with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('snapshot', _to_str(snapshot)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=range_get_content_md5)
-
-        response = self._perform_request(request, None)
-        return _parse_blob(blob_name, snapshot, response)
-
-    def get_blob_to_path(
-        self, container_name, blob_name, file_path, open_mode='wb',
-        snapshot=None, start_range=None, end_range=None,
-        range_get_content_md5=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0, lease_id=None,
-        if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Downloads a blob to a file path, with automatic chunking and progress
-        notifications. Returns an instance of :class:`Blob` with 
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str file_path:
-            Path of file to write out to.
-        :param str open_mode:
-            Mode to use when opening the file.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the blob sequentially.
-            Set to 2 or greater if you want to download a blob larger than 64MB in chunks.
-            If the blob size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        with open(file_path, open_mode) as stream:
-            blob = self.get_blob_to_stream(
-                container_name,
-                blob_name,
-                stream,
-                snapshot,
-                start_range,
-                end_range,
-                range_get_content_md5,
-                progress_callback,
-                max_connections,
-                max_retries,
-                retry_wait,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout)
-
-        return blob
-
-    def get_blob_to_stream(
-        self, container_name, blob_name, stream, snapshot=None,
-        start_range=None, end_range=None, range_get_content_md5=None,
-        progress_callback=None, max_connections=1, max_retries=5,
-        retry_wait=1.0, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Downloads a blob to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param io.IOBase stream:
-            Opened stream to write to.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the blob sequentially.
-            Set to 2 or greater if you want to download a blob larger than 64MB in chunks.
-            If the blob size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-
-        if sys.version_info >= (3,) and max_connections > 1 and not stream.seekable():
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        # Only get properties if parallelism will actually be used
-        blob_size = None
-        if max_connections > 1 and range_get_content_md5 is None:
-            blob = self.get_blob_properties(container_name, blob_name, timeout=timeout)
-            blob_size = blob.properties.content_length
-
-            # If blob size is large, use parallel download
-            if blob_size >= self.MAX_SINGLE_GET_SIZE:
-                _download_blob_chunks(
-                    self,
-                    container_name,
-                    blob_name,
-                    blob_size,
-                    self.MAX_CHUNK_GET_SIZE,
-                    start_range,
-                    end_range,
-                    stream,
-                    max_connections,
-                    max_retries,
-                    retry_wait,
-                    progress_callback,
-                    if_modified_since,
-                    if_unmodified_since,
-                    if_match,
-                    if_none_match,
-                    timeout,
-                )
-                return blob
-
-        # If parallelism is off or the blob is small, do a single download
-        download_size = _get_download_size(start_range, end_range, blob_size)
-        if progress_callback:
-            progress_callback(0, download_size)
-
-        blob = self._get_blob(container_name,
-                              blob_name,
-                              snapshot,
-                              start_range=start_range,
-                              end_range=end_range,
-                              range_get_content_md5=range_get_content_md5,
-                              lease_id=lease_id,
-                              if_modified_since=if_modified_since,
-                              if_unmodified_since=if_unmodified_since,
-                              if_match=if_match,
-                              if_none_match=if_none_match,
-                              timeout=timeout)
-
-        if blob.content is not None:
-            stream.write(blob.content)
-    
-        if progress_callback:
-            download_size = len(blob.content)
-            progress_callback(download_size, download_size)
-
-        blob.content = None # Clear blob content since output has been written to user stream
-        return blob
-        
-    def get_blob_to_bytes(
-        self, container_name, blob_name, snapshot=None,
-        start_range=None, end_range=None, range_get_content_md5=None,
-        progress_callback=None, max_connections=1, max_retries=5,
-        retry_wait=1.0, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None,
-        timeout=None):
-        '''
-        Downloads a blob as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the blob sequentially.
-            Set to 2 or greater if you want to download a blob larger than 64MB in chunks.
-            If the blob size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        stream = BytesIO()
-        blob = self.get_blob_to_stream(
-            container_name,
-            blob_name,
-            stream,
-            snapshot,
-            start_range,
-            end_range,
-            range_get_content_md5,
-            progress_callback,
-            max_connections,
-            max_retries,
-            retry_wait,
-            lease_id,
-            if_modified_since,
-            if_unmodified_since,
-            if_match,
-            if_none_match,
-            timeout)
-
-        blob.content = stream.getvalue()
-        return blob
-
-    def get_blob_to_text(
-        self, container_name, blob_name, encoding='utf-8', snapshot=None,
-        start_range=None, end_range=None, range_get_content_md5=None,
-        progress_callback=None, max_connections=1, max_retries=5,
-        retry_wait=1.0, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None,
-        timeout=None):
-        '''
-        Downloads a blob as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str encoding:
-            Python encoding to use when decoding the blob data.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the blob sequentially.
-            Set to 2 or greater if you want to download a blob larger than 64MB in chunks.
-            If the blob size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('encoding', encoding)
-
-        blob = self.get_blob_to_bytes(container_name,
-                                        blob_name,
-                                        snapshot,
-                                        start_range,
-                                        end_range,
-                                        range_get_content_md5,
-                                        progress_callback,
-                                        max_connections,
-                                        max_retries,
-                                        retry_wait,
-                                        lease_id,
-                                        if_modified_since,
-                                        if_unmodified_since,
-                                        if_match,
-                                        if_none_match,
-                                        timeout)
-        blob.content = blob.content.decode(encoding)
-        return blob
-
-    def get_blob_metadata(
-        self, container_name, blob_name, snapshot=None, lease_id=None,
-        if_modified_since=None, if_unmodified_since=None, if_match=None,
-        if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified blob or snapshot.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the blob metadata name, value pairs.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('snapshot', _to_str(snapshot)),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]     
-
-        response = self._perform_request(request)
-        return _parse_metadata(response)
-
-    def set_blob_metadata(self, container_name, blob_name,
-                          metadata=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None,
-                          if_match=None, if_none_match=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified blob as one or more
-        name-value pairs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the blob. To remove all
-            metadata from the blob, call this operation with no metadata headers.
-        :type metadata: a dict mapping str to str
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-meta-name-values', metadata),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def _lease_blob_impl(self, container_name, blob_name,
-                         lease_action, lease_id,
-                         lease_duration, lease_break_period,
-                         proposed_lease_id, if_modified_since,
-                         if_unmodified_since, if_match, if_none_match, timeout=None):
-        '''
-        Establishes and manages a lock on a blob for write and delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_action:
-            Possible _LeaseActions acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for acquire, required for change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'lease'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-lease-action', _to_str(lease_action)),
-            ('x-ms-lease-duration', _to_str(lease_duration)),
-            ('x-ms-lease-break-period', _to_str(lease_break_period)),
-            ('x-ms-proposed-lease-id', _to_str(proposed_lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-
-        return self._perform_request(request)
-
-    def acquire_blob_lease(self, container_name, blob_name,
-                           lease_duration=-1,
-                           proposed_lease_id=None,
-                           if_modified_since=None,
-                           if_unmodified_since=None,
-                           if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Acquires a lock on a blob for write and delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-
-        if lease_duration != -1 and\
-           (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-        response = self._lease_blob_impl(container_name,
-                                     blob_name,
-                                     _LeaseActions.Acquire,
-                                     None, # lease_id
-                                     lease_duration,
-                                     None, # lease_break_period
-                                     proposed_lease_id,
-                                     if_modified_since,
-                                     if_unmodified_since,
-                                     if_match,
-                                     if_none_match,
-                                     timeout)
-        return _parse_lease_id(response)
-
-    def renew_blob_lease(self, container_name, blob_name,
-                         lease_id, if_modified_since=None,
-                         if_unmodified_since=None, if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Renews a lock on a blob for write and delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        response = self._lease_blob_impl(container_name,
-                                            blob_name,
-                                            _LeaseActions.Renew,
-                                            lease_id,
-                                            None, # lease_duration
-                                            None, # lease_break_period
-                                            None, # proposed_lease_id
-                                            if_modified_since,
-                                            if_unmodified_since,
-                                            if_match,
-                                            if_none_match,
-                                            timeout)
-        return _parse_lease_id(response)
-
-    def release_blob_lease(self, container_name, blob_name,
-                           lease_id, if_modified_since=None,
-                           if_unmodified_since=None, if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Releases a lock on a blob for write and delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_blob_impl(container_name,
-                                blob_name,
-                                _LeaseActions.Release,
-                                lease_id,
-                                None, # lease_duration
-                                None, # lease_break_period
-                                None, # proposed_lease_id
-                                if_modified_since,
-                                if_unmodified_since,
-                                if_match,
-                                if_none_match,
-                                timeout)
-
-    def break_blob_lease(self, container_name, blob_name,
-                         lease_break_period=None,
-                         if_modified_since=None,
-                         if_unmodified_since=None,
-                         if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Breaks a lock on a blob for write and delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        response = self._lease_blob_impl(container_name,
-                                     blob_name,
-                                     _LeaseActions.Break,
-                                     None, # lease_id
-                                     None, # lease_duration
-                                     lease_break_period,
-                                     None, # proposed_lease_id
-                                     if_modified_since,
-                                     if_unmodified_since,
-                                     if_match,
-                                     if_none_match,
-                                     timeout)
-        return _parse_lease_time(response)
-
-    def change_blob_lease(self, container_name, blob_name,
-                         lease_id,
-                         proposed_lease_id,
-                         if_modified_since=None,
-                         if_unmodified_since=None,
-                         if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Changes a lock on a blob for write and delete operations.
-        The lock duration can be 15 to 60 seconds, or can be infinite.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        self._lease_blob_impl(container_name,
-                                blob_name,
-                                _LeaseActions.Change,
-                                lease_id,
-                                None, # lease_duration
-                                None, # lease_break_period
-                                proposed_lease_id,
-                                if_modified_since,
-                                if_unmodified_since,
-                                if_match,
-                                if_none_match,
-                                timeout)
-
-    def snapshot_blob(self, container_name, blob_name,
-                      metadata=None, if_modified_since=None,
-                      if_unmodified_since=None, if_match=None,
-                      if_none_match=None, lease_id=None, timeout=None):
-        '''
-        Creates a read-only snapshot of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Specifies a user-defined name-value pair associated with the blob.
-            If no name-value pairs are specified, the operation will copy the
-            base blob metadata to the snapshot. If one or more name-value pairs
-            are specified, the snapshot is created with the specified metadata,
-            and metadata is not copied from the base blob.
-        :type metadata: a dict mapping str to str
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'snapshot'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-meta-name-values', metadata),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-            ('x-ms-lease-id', _to_str(lease_id))
-        ]
-
-        response = self._perform_request(request)
-        return _parse_snapshot_blob(blob_name, response)
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None):
-        '''
-        Copies a blob to a destination within the storage account.
-        The source for a Copy Blob operation can be a committed blob 
-        or an Azure file in any Azure storage account.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str copy_source:
-            URL up to 2 KB in length that specifies a blob. A source blob in
-            the same account can be private, but a blob in another account
-            must be public or accept credentials included in this URL, such as
-            a Shared Access Signature. Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-        :param metadata:
-            Dict containing name and value pairs.
-        :type metadata: A dict mapping str to str.
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.  
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source', copy_source)
-
-        if copy_source.startswith('/'):
-            # Backwards compatibility for earlier versions of the SDK where
-            # the copy source can be in the following formats:
-            # - Blob in named container:
-            #     /accountName/containerName/blobName
-            # - Snapshot in named container:
-            #     /accountName/containerName/blobName?snapshot=<DateTime>
-            # - Blob in root container:
-            #     /accountName/blobName
-            # - Snapshot in root container:
-            #     /accountName/blobName?snapshot=<DateTime>
-            account, _, source =\
-                copy_source.partition('/')[2].partition('/')
-            copy_source = self.protocol + '://' + \
-                self.primary_endpoint + '/' + source
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [
-            ('x-ms-copy-source', _to_str(copy_source)),
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-source-if-modified-since',
-             _to_str(source_if_modified_since)),
-            ('x-ms-source-if-unmodified-since',
-             _to_str(source_if_unmodified_since)),
-            ('x-ms-source-if-match', _to_str(source_if_match)),
-            ('x-ms-source-if-none-match',
-             _to_str(source_if_none_match)),
-            ('If-Modified-Since', _datetime_to_utc_string(destination_if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(destination_if_unmodified_since)),
-            ('If-Match', _to_str(destination_if_match)),
-            ('If-None-Match', _to_str(destination_if_none_match)),
-            ('x-ms-lease-id', _to_str(destination_lease_id)),
-            ('x-ms-source-lease-id', _to_str(source_lease_id))
-        ]
-
-        response = self._perform_request(request)
-        props = _parse_properties(response, BlobProperties)
-        return props.copy
-
-    def abort_copy_blob(self, container_name, blob_name, copy_id,
-                        lease_id=None, timeout=None):
-        '''
-         Aborts a pending copy_blob operation, and leaves a destination blob
-         with zero length and full metadata.
-
-         :param str container_name:
-             Name of destination container.
-         :param str blob_name:
-             Name of destination blob.
-         :param str copy_id:
-             Copy identifier provided in the copy.id of the original
-             copy_blob operation.
-         :param str lease_id:
-             Required if the destination blob has an active infinite lease.
-         :param int timeout:
-             The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'copy'),
-            ('copyid', _to_str(copy_id)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-copy-action', 'abort'),
-        ]
-
-        self._perform_request(request)
-
-    def delete_blob(self, container_name, blob_name, snapshot=None,
-                    lease_id=None, delete_snapshots=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Marks the specified blob or snapshot for deletion.
-        The blob is later deleted during garbage collection.
-
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the Delete
-        Blob operation.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to delete.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param delete_snapshots:
-            Required if the blob has associated snapshots.
-        :type delete_snapshots: 
-            One of the values listed in the :class:`~azure.storage.blob.models.DeleteSnapshot` enum.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-delete-snapshots', _to_str(delete_snapshots)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-        request.query = [
-            ('snapshot', _to_str(snapshot)),
-            ('timeout', _int_to_str(timeout))
-        ]
-
-        self._perform_request(request)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/blockblobservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/blockblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/blockblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/blockblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,786 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-)
-from .._common_conversion import (
-    _encode_base64,
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-)
-from .._serialization import (
-    _get_request_body,
-    _get_request_body_bytes_only,
-)
-from .._http import HTTPRequest
-from ._chunking import (
-    _BlockBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .models import (
-    _BlobTypes,
-)
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ._serialization import (
-    _convert_block_list_to_xml,
-    _get_path,
-)
-from ._deserialization import (
-    _convert_xml_to_block_list,
-    _parse_base_properties,
-)
-from .baseblobservice import BaseBlobService
-from os import path
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class BlockBlobService(BaseBlobService):
-    '''
-    Block blobs let you upload large blobs efficiently. Block blobs are comprised
-    of blocks, each of which is identified by a block ID. You create or modify a
-    block blob by writing a set of blocks and committing them by their block IDs.
-    Each block can be a different size, up to a maximum of 4 MB, and a block blob
-    can include up to 50,000 blocks. The maximum size of a block blob is therefore
-    slightly more than 195 GB (4 MB X 50,000 blocks). If you are writing a block
-    blob that is no more than 64 MB in size, you can upload it in its entirety with
-    a single write operation; see create_blob_from_bytes. 
-    '''
-
-    MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        self.blob_type = _BlobTypes.BlockBlob
-        super(BlockBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, 
-            custom_domain, request_session, connection_string)
-
-    def _put_blob(self, container_name, blob_name, blob, content_settings=None,
-                  metadata=None, lease_id=None, if_modified_since=None,
-                  if_unmodified_since=None, if_match=None,
-                  if_none_match=None, timeout=None):
-        '''
-        Creates a blob or updates an existing blob.
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as bytes (size < 64MB). For larger size, you
-            must call put_block and put_block_list to set content of blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the new Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [
-            ('x-ms-blob-type', _to_str(self.blob_type)),
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match))
-        ]
-        if content_settings is not None:
-            request.headers += content_settings._to_headers()
-        request.body = _get_request_body_bytes_only('blob', blob)
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def put_block(self, container_name, blob_name, block, block_id,
-                  content_md5=None, lease_id=None, timeout=None):
-        '''
-        Creates a new block to be committed as part of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes block:
-            Content of the block.
-        :param str block_id:
-            A valid Base64 string value that identifies the block. Prior to
-            encoding, the string must be less than or equal to 64 bytes in size.
-            For a given blob, the length of the value specified for the blockid
-            parameter must be the same size for each block. Note that the Base64
-            string must be URL-encoded.
-        :param int content_md5:
-            An MD5 hash of the block content. This hash is used to
-            verify the integrity of the blob during transport. When this
-            header is specified, the storage service checks the hash that has
-            arrived with the one that was sent.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_not_none('block_id', block_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'block'),
-            ('blockid', _encode_base64(_to_str(block_id))),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('Content-MD5', _to_str(content_md5)),
-            ('x-ms-lease-id', _to_str(lease_id))
-        ]
-        request.body = _get_request_body_bytes_only('block', block)
-
-        self._perform_request(request)
-
-    def put_block_list(
-        self, container_name, blob_name, block_list,
-        transactional_content_md5=None, content_settings=None,
-        metadata=None, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, 
-        timeout=None):
-        '''
-        Writes a blob by specifying the list of block IDs that make up the blob.
-        In order to be written as part of a blob, a block must have been
-        successfully written to the server in a prior Put Block operation.
-
-        You can call Put Block List to update a blob by uploading only those
-        blocks that have changed, then committing the new and existing blocks
-        together. You can do this by specifying whether to commit a block from
-        the committed block list or from the uncommitted block list, or to commit
-        the most recently uploaded version of the block, whichever list it may
-        belong to.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param block_list:
-            A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state.
-        :type block_list: list of :class:`~azure.storage.blob.models.BlobBlock`
-        :param str transactional_content_md5:
-            An MD5 hash of the block content. This hash is used to
-            verify the integrity of the blob during transport. When this header
-            is specified, the storage service checks the hash that has arrived
-            with the one that was sent.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block_list', block_list)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'blocklist'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('Content-MD5', _to_str(transactional_content_md5)),
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-        if content_settings is not None:
-            request.headers += content_settings._to_headers()
-        request.body = _get_request_body(
-            _convert_block_list_to_xml(block_list))
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def get_block_list(self, container_name, blob_name, snapshot=None,
-                       block_list_type=None, lease_id=None, timeout=None):
-        '''
-        Retrieves the list of blocks that have been uploaded as part of a
-        block blob. There are two block lists maintained for a blob:
-            Committed Block List:
-                The list of blocks that have been successfully committed to a
-                given blob with Put Block List.
-            Uncommitted Block List:
-                The list of blocks that have been uploaded for a blob using
-                Put Block, but that have not yet been committed. These blocks
-                are stored in Azure in association with a blob, but do not yet
-                form part of the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            Datetime to determine the time to retrieve the blocks.
-        :param str block_list_type:
-            Specifies whether to return the list of committed blocks, the list
-            of uncommitted blocks, or both lists together. Valid values are:
-            committed, uncommitted, or all.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: list committed and/or uncommitted blocks for Block Blob
-        :rtype: :class:`~azure.storage.blob.models.BlobBlockList`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'blocklist'),
-            ('snapshot', _to_str(snapshot)),
-            ('blocklisttype', _to_str(block_list_type)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-lease-id', _to_str(lease_id))]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_block_list(response)
-
-    #----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-        self, container_name, blob_name, file_path, content_settings=None,
-        metadata=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                lease_id=lease_id,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                max_retries=max_retries,
-                retry_wait=retry_wait,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-    def create_blob_from_stream(
-        self, container_name, blob_name, stream, count=None,
-        content_settings=None, metadata=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file/stream, or updates the content of
-        an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-            Note that parallel upload requires the stream to be seekable.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-
-        if count and count < self.MAX_SINGLE_PUT_SIZE:
-            if progress_callback:
-                progress_callback(0, count)
-
-            data = stream.read(count)
-            self._put_blob(
-                container_name=container_name,
-                blob_name=blob_name,
-                blob=data,
-                content_settings=content_settings,
-                metadata=metadata,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-            if progress_callback:
-                progress_callback(count, count)
-        else:
-            block_ids = _upload_blob_chunks(
-                blob_service=self,
-                container_name=container_name,
-                blob_name=blob_name,
-                blob_size=count,
-                block_size=self.MAX_BLOCK_SIZE,
-                stream=stream,
-                max_connections=max_connections,
-                max_retries=max_retries,
-                retry_wait=retry_wait,
-                progress_callback=progress_callback,
-                lease_id=lease_id,
-                uploader_class=_BlockBlobChunkUploader,
-                timeout=timeout
-            )
-
-            self.put_block_list(
-                container_name=container_name,
-                blob_name=blob_name,
-                block_list=block_ids,
-                content_settings=content_settings,
-                metadata=metadata,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout
-            )
-
-    def create_blob_from_bytes(
-        self, container_name, blob_name, blob, index=0, count=None,
-        content_settings=None, metadata=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout)
-
-    def create_blob_from_text(
-        self, container_name, blob_name, text, encoding='utf-8',
-        content_settings=None, metadata=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from str/unicode, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        self.create_blob_from_bytes(
-            container_name=container_name,
-            blob_name=blob_name,
-            blob=text,
-            index=0,
-            count=len(text),
-            content_settings=content_settings,
-            metadata=metadata,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/models.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/models.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,667 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .._common_conversion import _to_str
-class Container(object):
-
-    '''
-    Blob container class. 
-    
-    :ivar str name: 
-        The name of the container.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the container as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list containers operation. If this parameter was specified but the 
-        container has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict mapping str to str
-    :ivar ContainerProperties properties:
-        System properties for the container.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or ContainerProperties()
-        self.metadata = metadata
-
-
-class ContainerProperties(object):
-
-    '''
-    Blob container's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the container was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar LeaseProperties lease:
-        Stores all the lease information for the container.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.lease = LeaseProperties()
-
-
-class Blob(object):
-
-    '''
-    Blob class.
-    
-    :ivar str name:
-        Name of blob.
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    :ivar content:
-        Blob content.
-    :vartype content: str or bytes
-    :ivar BlobProperties properties:
-        Stores all the system properties for the blob.
-    :ivar metadata:
-        Name-value pairs associated with the blob as metadata.
-    '''
-    def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None):
-        self.name = name
-        self.snapshot = snapshot
-        self.content = content
-        self.properties = props or BlobProperties()
-        self.metadata = metadata
-
-
-class BlobProperties(object):
-
-    '''
-    Blob Properties
-    
-    :ivar str blob_type:
-        String indicating this blob's type.
-    :ivar datetime last_modified:
-        A datetime object representing the last time the blob was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        Length of blob in bytes.
-    :ivar int append_blob_committed_block_count:
-        (For Append Blobs) Number of committed blocks in the blob.
-    :ivar int page_blob_sequence_number:
-        (For Page Blobs) Sequence number for page blob used for coordinating
-        concurrent writes.
-    :ivar ~azure.storage.blob.models.CopyProperties copy:
-        Stores all the copy properties for the blob.
-    :ivar ~azure.storage.blob.models.ContentSettings content_settings:
-        Stores all the content settings for the blob.
-    :ivar LeaseProperties lease:
-        Stores all the lease information for the blob.
-    '''
-
-    def __init__(self):
-        self.blob_type = None
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.append_blob_committed_block_count = None
-        self.page_blob_sequence_number = None
-        self.copy = CopyProperties()
-        self.content_settings = ContentSettings()
-        self.lease = LeaseProperties()
-
-
-class ContentSettings(object):
-
-    '''
-    Used to store the content settings of a blob.
-    
-    :ivar str content_type:
-        The content type specified for the blob. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If the content_encoding has previously been set
-        for the blob, that value is stored.
-    :ivar str content_language:
-        If the content_language has previously been set
-        for the blob, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the blob, that value is stored.
-    :ivar str cache_control:
-        If the cache_control has previously been set for
-        the blob, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the blob, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-        self, content_type=None, content_encoding=None,
-        content_language=None, content_disposition=None,
-        cache_control=None, content_md5=None):
-        
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return [
-            ('x-ms-blob-cache-control', _to_str(self.cache_control)),
-            ('x-ms-blob-content-type', _to_str(self.content_type)),
-            ('x-ms-blob-content-disposition',
-                _to_str(self.content_disposition)),
-            ('x-ms-blob-content-md5', _to_str(self.content_md5)),
-            ('x-ms-blob-content-encoding',
-                _to_str(self.content_encoding)),
-            ('x-ms-blob-content-language',
-                _to_str(self.content_language)),
-        ]
-
-
-class CopyProperties(object):
-    '''
-    Blob Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy Blob operation where this blob
-        was the destination blob. This header does not appear if this blob has never
-        been the destination in a Copy Blob operation, or if this blob has been
-        modified after a concluded Copy Blob operation using Set Blob Properties,
-        Put Blob, or Put Block List.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source blob used in the last attempted
-        Copy Blob operation where this blob was the destination blob. This header does not
-        appear if this blob has never been the destination in a Copy Blob operation, or if
-        this blob has been modified after a concluded Copy Blob operation using
-        Set Blob Properties, Put Blob, or Put Block List.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending:
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don’t cause failure.
-            aborted:
-                Copy was ended by Abort Copy Blob.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy Blob operation where this blob was the destination blob. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy Blob operation where this blob was the
-        destination blob. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class LeaseProperties(object):
-
-    '''
-    Blob Lease Properties.
-    
-    :ivar str status:
-        The lease status of the blob.
-    :ivar str state:
-        Lease state of the blob.
-        Possible values: pending|success|aborted|failed
-    :ivar str duration:
-        When a blob is leased, specifies whether the lease is of infinite or fixed duration.
-    '''
-
-    def __init__(self):
-        self.status = None
-        self.state = None
-        self.duration = None
-
-
-class BlobPrefix(object):
-    '''
-    BlobPrefix objects may potentially returned in the blob list when 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is 
-    used with a delimiter. Prefixes can be thought of as virtual blob directories.
-    
-    :ivar str name: The name of the blob prefix.
-    '''
-
-    def __init__(self):
-        self.name = None
-
-
-class BlobBlockState(object):
-    '''Block blob block types.'''
-
-    Committed = 'Committed'
-    '''Committed blocks.'''
-
-    Latest = 'Latest'
-    '''Latest blocks.'''
-
-    Uncommitted = 'Uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class BlobBlock(object):
-
-    '''
-    BlockBlob Block class.
-    
-    :ivar str id:
-        Block id.
-    :ivar str state:
-        Block state.
-        Possible valuse: committed|uncommitted
-    :ivar int size:
-        Block size in bytes.
-    '''
-
-    def __init__(self, id=None, state=BlobBlockState.Latest):
-        self.id = id
-        self.state = state
-
-    def _set_size(self, size):
-        self.size = size
-
-
-class BlobBlockList(object):
-
-    '''
-    Blob Block List class.
-   
-    :ivar committed_blocks:
-        List of committed blocks.
-    :vartype committed_blocks: list of :class:`BlobBlock`
-    :ivar uncommitted_blocks:
-        List of uncommitted blocks.
-    :vartype uncommitted_blocks: list of :class:`BlobBlock`
-    '''
-
-    def __init__(self):
-        self.committed_blocks = list()
-        self.uncommitted_blocks = list()
-
-class PageRange(object):
-
-    '''
-    Page Range for page blob.
-    
-    :ivar int start:
-        Start of page range in bytes.
-    :ivar int end:
-        End of page range in bytes.
-    '''
-
-    def __init__(self, start=None, end=None):
-        self.start = start
-        self.end = end
-
-class ResourceProperties(object):
-
-    '''
-    Base response for a resource request.
-    
-    :ivar str etag:
-        Opaque etag value that can be used to check if resource
-        has been modified.
-    :ivar datetime last_modified:
-        Datetime for last time resource was modified.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-
-class AppendBlockProperties(ResourceProperties):
-
-    '''
-    Response for an append block request.
-    
-    :ivar int append_offset:
-        Position to start next append.
-    :ivar int committed_block_count:
-        Number of committed append blocks.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.append_offset = None
-        self.committed_block_count = None
-
-
-class PageBlobProperties(ResourceProperties):
-
-    '''
-    Response for a page request.
-    
-    :ivar int sequence_number:
-        Identifer for page blobs to help handle concurrent writes.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.sequence_number = None
-
-
-class PublicAccess(object):
-    '''
-    Specifies whether data in the container may be accessed publicly and the level of access.
-    '''
-
-    Blob = 'blob'
-    '''
-    Specifies public read access for blobs. Blob data within this container can be read 
-    via anonymous request, but container data is not available. Clients cannot enumerate 
-    blobs within the container via anonymous request.
-    '''
-
-    Container = 'container'
-    '''
-    Specifies full public read access for container and blob data. Clients can enumerate 
-    blobs within the container via anonymous request, but cannot enumerate containers 
-    within the storage account.
-    '''
-
-class DeleteSnapshot(object):
-    '''
-    Required if the blob has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the base blob and all of its snapshots.
-    '''
-
-    Only = 'only'
-    '''
-    Delete only the blob's snapshots and not the blob itself.
-    '''
-
-class BlockListType(object):
-    '''
-    Specifies whether to return the list of committed blocks, the list of uncommitted 
-    blocks, or both lists together.
-    '''
-
-    All = 'all'
-    '''Both committed and uncommitted blocks.'''
-
-    Committed = 'committed'
-    '''Committed blocks.'''
-
-    Uncommitted = 'uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class SequenceNumberAction(object):
-    '''Sequence number actions.'''
-
-    Increment = 'increment'
-    '''
-    Increments the value of the sequence number by 1. If specifying this option, 
-    do not include the x-ms-blob-sequence-number header.
-    '''
-
-    Max = 'max'
-    '''
-    Sets the sequence number to be the higher of the value included with the 
-    request and the value currently stored for the blob.
-    '''
-
-    Update = 'update'
-    '''Sets the sequence number to the value included with the request.'''
-
-
-class _LeaseActions(object):
-    '''Actions for a lease.'''
-
-    Acquire = 'acquire'
-    '''Acquire the lease.'''
-
-    Break = 'break'
-    '''Break the lease.'''
-
-    Change = 'change'
-    '''Change the lease ID.'''
-
-    Release = 'release'
-    '''Release the lease.'''
-
-    Renew = 'renew'
-    '''Renew the lease.'''
-
-class _BlobTypes(object):
-    '''Blob type options.'''
-
-    AppendBlob = 'AppendBlob'
-    '''Append blob type.'''
-
-    BlockBlob = 'BlockBlob'
-    '''Block blob type.'''
-
-    PageBlob = 'PageBlob'
-    '''Page blob type.'''
-
-class Include(object):
-
-    '''
-    Specifies the datasets to include in the blob list response.
-
-    :ivar ~azure.storage.blob.models.Include Include.COPY: 
-        Specifies that metadata related to any current or previous Copy Blob operation 
-        should be included in the response.
-    :ivar ~azure.storage.blob.models.Include Include.METADATA: 
-        Specifies that metadata be returned in the response.
-    :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: 
-        Specifies that snapshots should be included in the enumeration.
-    :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: 
-        Specifies that blobs for which blocks have been uploaded, but which have not 
-        been committed using Put Block List, be included in the response.
-    '''
-
-    def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False, 
-                 copy=False, _str=None):
-        '''
-        :param bool snapshots:
-             Specifies that snapshots should be included in the enumeration.
-        :param bool metadata:
-            Specifies that metadata be returned in the response.
-        :param bool uncommitted_blobs:
-            Specifies that blobs for which blocks have been uploaded, but which have 
-            not been committed using Put Block List, be included in the response.
-        :param bool copy: 
-            Specifies that metadata related to any current or previous Copy Blob 
-            operation should be included in the response. 
-        :param str _str: 
-            A string representing the includes.
-        '''
-        if not _str:
-            _str = ''
-        components = _str.split(',')
-        self.snapshots = snapshots or ('snapshots' in components)
-        self.metadata = metadata or ('metadata' in components)
-        self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
-        self.copy = copy or ('copy' in components)
-    
-    def __or__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Include(_str=str(self) + str(other))
-    
-    def __str__(self):
-        include = (('snapshots,' if self.snapshots else '') + 
-                   ('metadata,' if self.metadata else '') +
-                   ('uncommittedblobs,' if self.uncommitted_blobs else '') +
-                   ('copy,' if self.copy else ''))
-        return include.rstrip(',')
-
-Include.COPY = Include(copy=True)
-Include.METADATA = Include(metadata=True)
-Include.SNAPSHOTS = Include(snapshots=True)
-Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
-
-
-class BlobPermissions(object):
-
-    '''
-    BlobPermissions class to be used with 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
-
-    :ivar BlobPermissions BlobPermissions.ADD:
-        Add a block to an append blob.
-    :ivar BlobPermissions BlobPermissions.CREATE:
-        Write a new blob, snapshot a blob, or copy a blob to a new blob.
-    :ivar BlobPermissions BlobPermissions.DELETE:
-        Delete the blob.
-    :ivar BlobPermissions BlobPermissions.READ:
-        Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
-    :ivar BlobPermissions BlobPermissions.WRITE:
-        Create or write content, properties, metadata, or block list. Snapshot or lease 
-        the blob. Resize the blob (page blob only). Use the blob as the destination of a 
-        copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, add=False, create=False, write=False, 
-                 delete=False, _str=None):
-        '''    
-        :param bool read:
-            Read the content, properties, metadata and block list. Use the blob as 
-            the source of a copy operation.
-        :param bool add:
-            Add a block to an append blob.
-        :param bool create:
-            Write a new blob, snapshot a blob, or copy a blob to a new blob.
-        :param bool write: 
-            Create or write content, properties, metadata, or block list. Snapshot 
-            or lease the blob. Resize the blob (page blob only). Use the blob as the 
-            destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the blob.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-    
-    def __or__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-BlobPermissions.ADD = BlobPermissions(add=True)
-BlobPermissions.CREATE = BlobPermissions(create=True)
-BlobPermissions.DELETE = BlobPermissions(delete=True)
-BlobPermissions.READ = BlobPermissions(read=True)
-BlobPermissions.WRITE = BlobPermissions(write=True)
-
-
-class ContainerPermissions(object):
-
-    '''
-    ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
-    API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. 
-
-    :ivar ContainerPermissions ContainerPermissions.DELETE:
-        Delete any blob in the container. Note: You cannot grant permissions to 
-        delete a container with a container SAS. Use an account SAS instead.
-    :ivar ContainerPermissions ContainerPermissions.LIST:
-        List blobs in the container.
-    :ivar ContainerPermissions ContainerPermissions.READ:
-        Read the content, properties, metadata or block list of any blob in the 
-        container. Use any blob in the container as the source of a copy operation.
-    :ivar ContainerPermissions ContainerPermissions.WRITE:
-        For any blob in the container, create or write content, properties, 
-        metadata, or block list. Snapshot or lease the blob. Resize the blob 
-        (page blob only). Use the blob as the destination of a copy operation 
-        within the same account. Note: You cannot grant permissions to read or 
-        write container properties or metadata, nor to lease a container, with 
-        a container SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False, 
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata or block list of any blob in the 
-            container. Use any blob in the container as the source of a copy operation.
-        :param bool write: 
-            For any blob in the container, create or write content, properties, 
-            metadata, or block list. Snapshot or lease the blob. Resize the blob 
-            (page blob only). Use the blob as the destination of a copy operation 
-            within the same account. Note: You cannot grant permissions to read or 
-            write container properties or metadata, nor to lease a container, with 
-            a container SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any blob in the container. Note: You cannot grant permissions to 
-            delete a container with a container SAS. Use an account SAS instead.
-        :param bool list: 
-            List blobs in the container.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-    
-    def __or__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') + 
-                ('l' if self.list else ''))
-
-ContainerPermissions.DELETE = ContainerPermissions(delete=True)
-ContainerPermissions.LIST = ContainerPermissions(list=True)
-ContainerPermissions.READ = ContainerPermissions(read=True)
-ContainerPermissions.WRITE = ContainerPermissions(write=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/pageblobservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/pageblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/blob/pageblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/blob/pageblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,929 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-)
-from .._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-)
-from .._serialization import (
-    _get_request_body_bytes_only,
-)
-from .._http import HTTPRequest
-from ._error import (
-    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
-)
-from ._chunking import (
-    _PageBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .models import (
-    _BlobTypes,
-    PageBlobProperties,
-)
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._deserialization import (
-    _convert_xml_to_page_ranges,
-    _parse_page_properties,
-    _parse_base_properties,
-)
-from .baseblobservice import BaseBlobService
-from os import path
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
-_PAGE_ALIGNMENT = 512
-
-
-class PageBlobService(BaseBlobService):
-    '''
-    Page blobs are a collection of 512-byte pages optimized for random read and
-    write operations. To create a page blob, you initialize the page blob and
-    specify the maximum size the page blob will grow. To add or update the
-    contents of a page blob, you write a page or pages by specifying an offset
-    and a range that align to 512-byte page boundaries. A write to a page blob
-    can overwrite just one page, some pages, or up to 4 MB of the page blob.
-    Writes to page blobs happen in-place and are immediately committed to the
-    blob. The maximum size for a page blob is 1 TB.
-    '''
-
-    MAX_PAGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        self.blob_type = _BlobTypes.PageBlob
-        super(PageBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, 
-            custom_domain, request_session, connection_string)
-
-    def create_blob(
-        self, container_name, blob_name, content_length, content_settings=None,
-        sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new Page Blob.
-
-        See create_blob_from_* for high level functions that handle the
-        creation and upload of large blobs with automatic chunking and
-        progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param int content_length:
-            Required. This header specifies the maximum size
-            for the page blob, up to 1 TB. The page blob size must be aligned
-            to a 512-byte boundary.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param int sequence_number:
-            The sequence number is a user-controlled value that you can use to
-            track requests. The value of the sequence number must be between 0
-            and 2^63 - 1.The default value is 0.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the new Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [
-            ('x-ms-blob-type', _to_str(self.blob_type)),
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-blob-content-length', _to_str(content_length)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-blob-sequence-number', _to_str(sequence_number)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match))
-        ]
-        if content_settings is not None:
-            request.headers += content_settings._to_headers()
-
-        response = self._perform_request(request)
-        return _parse_base_properties(response)
-
-    def update_page(
-        self, container_name, blob_name, page, start_range, end_range,
-        content_md5=None, lease_id=None, if_sequence_number_lte=None,
-        if_sequence_number_lt=None, if_sequence_number_eq=None,
-        if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Updates a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes page:
-            Content of the page.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int content_md5:
-            An MD5 hash of the page content. This hash is used to
-            verify the integrity of the page during transport. When this header
-            is specified, the storage service compares the hash of the content
-            that has arrived with the header value that was sent. If the two
-            hashes do not match, the operation will fail with error code 400
-            (Bad Request).
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('page', page)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'page'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('Content-MD5', _to_str(content_md5)),
-            ('x-ms-page-write', 'update'),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-if-sequence-number-le',
-             _to_str(if_sequence_number_lte)),
-            ('x-ms-if-sequence-number-lt',
-             _to_str(if_sequence_number_lt)),
-            ('x-ms-if-sequence-number-eq',
-             _to_str(if_sequence_number_eq)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match))
-        ]
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-        request.body = _get_request_body_bytes_only('page', page)
-
-        response = self._perform_request(request)
-        return _parse_page_properties(response)
-
-    def clear_page(
-        self, container_name, blob_name, start_range, end_range,
-        lease_id=None, if_sequence_number_lte=None,
-        if_sequence_number_lt=None, if_sequence_number_eq=None,
-        if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Clears a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'page'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-page-write', 'clear'),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('x-ms-if-sequence-number-le',
-             _to_str(if_sequence_number_lte)),
-            ('x-ms-if-sequence-number-lt',
-             _to_str(if_sequence_number_lt)),
-            ('x-ms-if-sequence-number-eq',
-             _to_str(if_sequence_number_eq)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match))
-        ]
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-
-        response = self._perform_request(request)
-        return _parse_page_properties(response)
-
-    def get_page_ranges(
-        self, container_name, blob_name, snapshot=None, start_range=None,
-        end_range=None, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Returns the list of valid page ranges for a Page Blob or snapshot
-        of a page blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve information
-            from.
-        :param int start_range:
-            Start of byte range to use for getting valid page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting valid page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of valid Page Ranges for the Page Blob.
-        :rtype: list of :class:`~azure.storage.blob.models.PageRange`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'pagelist'),
-            ('snapshot', _to_str(snapshot)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        response = self._perform_request(request)
-        return _convert_xml_to_page_ranges(response)
-
-    def set_sequence_number(
-        self, container_name, blob_name, sequence_number_action, sequence_number=None,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        
-        '''
-        Sets the blob sequence number.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str sequence_number_action:
-            This property indicates how the service should modify the blob's sequence
-            number. See :class:`.SequenceNumberAction` for more information.
-        :param str sequence_number:
-            This property sets the blob's sequence number. The sequence number is a
-            user-controlled property that you can use to track requests and manage
-            concurrency issues.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('sequence_number_action', sequence_number_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-blob-sequence-number', _to_str(sequence_number)),
-            ('x-ms-sequence-number-action', _to_str(sequence_number_action)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_page_properties(response)
-
-    def resize_blob(
-        self, container_name, blob_name, content_length,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        
-        '''
-        Resizes a page blob to the specified size. If the specified value is less
-        than the current size of the blob, then all pages above the specified value
-        are cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int content_length:
-            Size to resize blob to.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(container_name, blob_name)
-        request.query = [
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-blob-content-length', _to_str(content_length)),
-            ('x-ms-lease-id', _to_str(lease_id)),
-            ('If-Modified-Since', _datetime_to_utc_string(if_modified_since)),
-            ('If-Unmodified-Since', _datetime_to_utc_string(if_unmodified_since)),
-            ('If-Match', _to_str(if_match)),
-            ('If-None-Match', _to_str(if_none_match)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_page_properties(response)
-
-    #----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-        self, container_name, blob_name, file_path, content_settings=None,
-        metadata=None, progress_callback=None, max_connections=1,
-        max_retries=5, retry_wait=1.0, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                max_retries=max_retries,
-                retry_wait=retry_wait,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-
-    def create_blob_from_stream(
-        self, container_name, blob_name, stream, count, content_settings=None,
-        metadata=None, progress_callback=None, max_connections=1,
-        max_retries=5, retry_wait=1.0, lease_id=None, if_modified_since=None,
-        if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file/stream, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a page
-            blob cannot be created if the count is unknown.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set the blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-            Note that parallel upload requires the stream to be seekable.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-
-        if count < 0:
-            raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        if count % _PAGE_ALIGNMENT != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
-
-        response = self.create_blob(
-            container_name=container_name,
-            blob_name=blob_name,
-            content_length=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_PAGE_SIZE,
-            stream=stream,
-            max_connections=max_connections,
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            progress_callback=progress_callback,
-            lease_id=lease_id,
-            uploader_class=_PageBlobChunkUploader,
-            if_match=response.etag,
-            timeout=timeout
-        )
-
-    def create_blob_from_bytes(
-        self, container_name, blob_name, blob, index=0, count=None,
-        content_settings=None, metadata=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0,
-        lease_id=None, if_modified_since=None, if_unmodified_since=None,
-        if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the byte array.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size
-            exceeds 64MB.
-            Set to 1 to upload the blob chunks sequentially.
-            Set to 2 or more to upload the blob chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of blob chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            max_retries=max_retries,
-            retry_wait=retry_wait,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/cloudstorageaccount.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/cloudstorageaccount.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/cloudstorageaccount.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/cloudstorageaccount.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,190 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-# Note that we import BlobService/QueueService/TableService on demand
-# because this module is imported by azure/storage/__init__
-# ie. we don't want 'import azure.storage' to trigger an automatic import
-# of blob/queue/table packages.
-
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from .models import (
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-)
-from ._error import _validate_not_none
-
-class CloudStorageAccount(object):
-    """
-    Provides a factory for creating the blob, queue, table, and file services
-    with a common account name and account key or sas token.  Users can either 
-    use the factory or can construct the appropriate service directly.
-    """
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.is_emulated = is_emulated
-
-    def create_block_blob_service(self):
-        '''
-        Creates a BlockBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService`
-        '''
-        from .blob.blockblobservice import BlockBlobService
-        return BlockBlobService(self.account_name, self.account_key, 
-                                sas_token=self.sas_token,
-                                is_emulated=self.is_emulated)
-
-    def create_page_blob_service(self):
-        '''
-        Creates a PageBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService`
-        '''
-        from .blob.pageblobservice import PageBlobService
-        return PageBlobService(self.account_name, self.account_key,
-                               sas_token=self.sas_token,
-                               is_emulated=self.is_emulated)
-
-    def create_append_blob_service(self):
-        '''
-        Creates a AppendBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService`
-        '''
-        from .blob.appendblobservice import AppendBlobService
-        return AppendBlobService(self.account_name, self.account_key,
-                                 sas_token=self.sas_token,
-                                 is_emulated=self.is_emulated)
-
-    def create_table_service(self):
-        '''
-        Creates a TableService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.table.tableservice.TableService`
-        '''
-        from .table.tableservice import TableService
-        return TableService(self.account_name, self.account_key,
-                            sas_token=self.sas_token,
-                            is_emulated=self.is_emulated)
-
-    def create_queue_service(self):
-        '''
-        Creates a QueueService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.queue.queueservice.QueueService`
-        '''
-        from .queue.queueservice import QueueService
-        return QueueService(self.account_name, self.account_key,
-                            sas_token=self.sas_token,
-                            is_emulated=self.is_emulated)
-
-    def create_file_service(self):
-        '''
-        Creates a FileService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.file.fileservice.FileService`
-        '''
-        from .file.fileservice import FileService
-        return FileService(self.account_name, self.account_key,
-                           sas_token=self.sas_token)
-
-    def generate_shared_access_signature(self, services, resource_types, 
-                                         permission, expiry, start=None, 
-                                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(services, resource_types, permission, 
-                                    expiry, start=start, ip=ip, protocol=protocol)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/common/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/common/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/common/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/common/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,38 +0,0 @@
-#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-from .._constants import (
-    __author__,
-    __version__,
-    X_MS_VERSION,
-)
-
-from ..models import (
-    RetentionPolicy,
-    Logging,
-    Metrics,
-    CorsRule,
-    ServiceProperties,
-    AccessPolicy,
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-    Protocol,
-)
-
-from ..cloudstorageaccount import CloudStorageAccount
-from ..sharedaccesssignature import (
-    SharedAccessSignature,
-)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/common/_error.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/common/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/common/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/common/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-from .._error import *
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/common/models.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/common/models.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/common/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/common/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-from ..models import *
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,29 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .models import (
-    Share,
-    ShareProperties,
-    File,
-    FileProperties,
-    Directory,
-    DirectoryProperties,
-    FileRange,
-    ContentSettings,
-    CopyProperties,
-    SharePermissions,
-    FilePermissions,
-)
-
-from .fileservice import FileService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/_chunking.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,266 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import threading
-
-from time import sleep
-from .._error import _ERROR_NO_SINGLE_THREAD_CHUNKING
-
-class _FileChunkDownloader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name, 
-                 file_size, chunk_size, start_range, end_range, stream,
-                 max_retries, retry_wait, progress_callback, timeout):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.chunk_size = chunk_size
-        if start_range is not None:
-            end_range = end_range or file_size
-            self.file_size = end_range - start_range
-            self.file_end = end_range
-            self.start_index = start_range
-        else:
-            self.file_size = file_size
-            self.file_end = file_size
-            self.start_index = 0
-
-        self.stream = stream
-        self.stream_start = stream.tell()
-        self.stream_lock = threading.Lock()
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock()
-        self.max_retries = max_retries
-        self.retry_wait = retry_wait
-        self.timeout = timeout
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.file_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.file_end:
-            chunk_end = self.file_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk_with_retries(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total = self.progress_total
-                self.progress_callback(total, self.file_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + chunk_start)
-            self.stream.write(chunk_data)
-
-    def _download_chunk_with_retries(self, chunk_start, chunk_end):
-        retries = self.max_retries
-        while True:
-            try:
-                return self.file_service._get_file(
-                    self.share_name,
-                    self.directory_name,
-                    self.file_name,
-                    start_range=chunk_start,
-                    end_range=chunk_end - 1,
-                    timeout=self.timeout
-                )
-            except Exception:
-                if retries > 0:
-                    retries -= 1
-                    sleep(self.retry_wait)
-                else:
-                    raise
-
-
-class _FileChunkUploader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name, 
-                 file_size, chunk_size, stream, parallel, max_retries, retry_wait,
-                 progress_callback, timeout):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.file_size = file_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock() if parallel else None
-        self.max_retries = max_retries
-        self.retry_wait = retry_wait
-        self.timeout = timeout
-
-    def get_chunk_offsets(self):
-        index = 0
-        if self.file_size is None:
-            # we don't know the size of the stream, so we have no
-            # choice but to seek
-            while True:
-                data = self._read_from_stream(index, 1)
-                if not data:
-                    break
-                yield index
-                index += self.chunk_size
-        else:
-            while index < self.file_size:
-                yield index
-                index += self.chunk_size
-
-    def process_chunk(self, chunk_offset):
-        size = self.chunk_size
-        if self.file_size is not None:
-            size = min(size, self.file_size - chunk_offset)
-        chunk_data = self._read_from_stream(chunk_offset, size)
-        return self._upload_chunk_with_retries(chunk_offset, chunk_data)
-
-    def process_all_unknown_size(self):
-        assert self.stream_lock is None
-        range_ids = []
-        index = 0
-        while True:
-            data = self._read_from_stream(None, self.chunk_size)
-            if data:
-                index += len(data)
-                range_id = self._upload_chunk_with_retries(index, data)
-                range_ids.append(range_id)
-            else:
-                break
-
-        return range_ids
-
-    def _read_from_stream(self, offset, count):
-        if self.stream_lock is not None:
-            with self.stream_lock:
-                self.stream.seek(self.stream_start + offset)
-                data = self.stream.read(count)
-        else:
-            data = self.stream.read(count)
-        return data
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.file_size)
-
-    def _upload_chunk_with_retries(self, chunk_start, chunk_data):
-        retries = self.max_retries
-        while True:
-            try:
-                range_id = self._upload_chunk(chunk_start, chunk_data) 
-                self._update_progress(len(chunk_data))
-                return range_id
-            except Exception:
-                if retries > 0:
-                    retries -= 1
-                    sleep(self.retry_wait)
-                else:
-                    raise
-
-    def _upload_chunk(self, chunk_start, chunk_data):
-        chunk_end = chunk_start + len(chunk_data) - 1
-        self.file_service.update_range(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            chunk_data,
-            chunk_start,
-            chunk_end,
-            timeout=self.timeout
-        )
-        return 'bytes={0}-{1}'.format(chunk_start, chunk_end)
-
-
-def _download_file_chunks(file_service, share_name, directory_name, file_name,
-                          file_size, block_size, start_range, end_range, stream,
-                          max_connections, max_retries, retry_wait, progress_callback,
-                          timeout):
-    if max_connections <= 1:
-        raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('file'))
-
-    downloader = _FileChunkDownloader(
-        file_service,
-        share_name,
-        directory_name, 
-        file_name,
-        file_size,
-        block_size,
-        start_range,
-        end_range,
-        stream,
-        max_retries,
-        retry_wait,
-        progress_callback,
-        timeout
-    )
-
-    if progress_callback is not None:
-        progress_callback(0, file_size)
-
-    import concurrent.futures
-    executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-    result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-
-def _upload_file_chunks(file_service, share_name, directory_name, file_name,
-                        file_size, block_size, stream, max_connections,
-                        max_retries, retry_wait, progress_callback, timeout):
-    uploader = _FileChunkUploader(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        file_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        max_retries,
-        retry_wait,
-        progress_callback,
-        timeout
-    )
-
-    if progress_callback is not None:
-        progress_callback(0, file_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
-    else:
-        if file_size is not None:
-            range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
-        else:
-            range_ids = uploader.process_all_unknown_size()
-
-    return range_ids
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,213 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from dateutil import parser
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from .models import (
-    Share,
-    Directory,
-    File,
-    FileProperties,
-    FileRange,
-    ShareProperties,
-    DirectoryProperties,
-)
-from ..models import (
-    _list,
-)
-from .._deserialization import (
-    _parse_properties,
-    _parse_metadata,
-)
-
-def _parse_share(name, response):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ShareProperties)
-    return Share(name, props, metadata)
-
-def _parse_directory(name, response):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, DirectoryProperties)
-    return Directory(name, props, metadata)
-
-def _parse_file(name, response):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, FileProperties)
-    return File(name, response.body, props, metadata)
-
-def _convert_xml_to_shares(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults AccountName="https://myaccount.file.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Shares>
-        <Share>
-          <Name>share-name</Name>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Quota>max-share-size</Quota>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Share>
-      </Shares>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    shares = _list()
-    list_element = ETree.fromstring(response.body)
-    
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(shares, 'next_marker', next_marker)
-
-    shares_element = list_element.find('Shares')
-
-    for share_element in shares_element.findall('Share'):
-        # Name element
-        share = Share()
-        share.name = share_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = share_element.find('Metadata')
-        if metadata_root_element is not None:
-            share.metadata = dict()
-            for metadata_element in metadata_root_element:
-                share.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = share_element.find('Properties')
-        share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        share.properties.etag = properties_element.findtext('Etag')
-        share.properties.quota = int(properties_element.findtext('Quota'))
-        
-        # Add share to list
-        shares.append(share)
-
-    return shares
-
-def _convert_xml_to_directories_and_files(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.file.core.windows.net/” ShareName="myshare" DirectoryPath="directory-path">
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Entries>
-        <File>
-          <Name>file-name</Name>
-          <Properties>
-            <Content-Length>size-in-bytes</Content-Length>
-          </Properties>
-        </File>
-        <Directory>
-          <Name>directory-name</Name>
-        </Directory>
-      </Entries>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    entries = _list()
-    list_element = ETree.fromstring(response.body)
-    
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(entries, 'next_marker', next_marker)
-
-    entries_element = list_element.find('Entries')
-
-    for file_element in entries_element.findall('File'):
-        # Name element
-        file = File()
-        file.name = file_element.findtext('Name')
-
-        # Properties
-        properties_element = file_element.find('Properties')
-        file.properties.content_length = int(properties_element.findtext('Content-Length'))
-        
-        # Add file to list
-        entries.append(file)
-
-    for directory_element in entries_element.findall('Directory'):
-        # Name element
-        directory = Directory()
-        directory.name = directory_element.findtext('Name')
-        
-        # Add directory to list
-        entries.append(directory)
-
-    return entries
-
-def _convert_xml_to_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <Ranges>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-    </Ranges>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    ranges = list()
-    ranges_element = ETree.fromstring(response.body)
-
-    for range_element in ranges_element.findall('Range'):
-        # Parse range
-        range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End')))
-        
-        # Add range to list
-        ranges.append(range)
-
-    return ranges
-
-def _convert_xml_to_share_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <ShareStats>
-       <ShareUsage>15</ShareUsage>
-    </ShareStats>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    share_stats_element = ETree.fromstring(response.body)
-    return int(share_stats_element.findtext('ShareUsage'))
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/_serialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,71 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from time import time
-from wsgiref.handlers import format_date_time
-from .._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-from .._common_conversion import _str
-
-def _get_path(share_name=None, directory_name=None, file_name=None):
-    '''
-    Creates the path to access a file resource.
-
-    share_name:
-        Name of share.
-    directory_name:
-        The path to the directory.
-    file_name:
-        Name of file.
-    '''
-    if share_name and directory_name and file_name:
-        return '/{0}/{1}/{2}'.format(
-            _str(share_name),
-            _str(directory_name),
-            _str(file_name))
-    elif share_name and directory_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(directory_name))
-    elif share_name and file_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(file_name))
-    elif share_name:
-        return '/{0}'.format(_str(share_name))
-    else:
-        return '/'
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, end_range_required=True, check_content_md5=False):
-    request.headers = request.headers or []
-    if start_range_required == True:
-        _validate_not_none('start_range', start_range)
-    if end_range_required == True:
-        _validate_not_none('end_range', end_range)
-    if end_range_required == True or end_range is not None:
-        _validate_not_none('end_range', end_range)        
-        request.headers.append(('x-ms-range', "bytes={0}-{1}".format(start_range, end_range)))
-    else:
-        request.headers.append(('x-ms-range', "bytes={0}-".format(start_range)))
-
-    if check_content_md5 == True:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers.append(('x-ms-range-get-content-md5', 'true'))
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/fileservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/fileservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/fileservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/fileservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2210 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from azure.common import AzureHttpError
-from .._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_STORAGE_MISSING_INFO,
-    _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-)
-from .._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from .._serialization import (
-    _get_request_body,
-    _get_request_body_bytes_only,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from .._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _get_download_size,
-    _parse_metadata,
-    _parse_properties,
-)
-from ..models import (
-    Services,
-    ListGenerator,
-)
-from .models import (
-    File,
-    FileProperties,
-)
-from .._http import HTTPRequest
-from ._chunking import (
-    _download_file_chunks,
-    _upload_file_chunks,
-)
-from .._auth import (
-    _StorageSharedKeyAuthentication,
-    _StorageSASAuthentication,
-)
-from .._connection import _ServiceParameters
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._deserialization import (
-    _convert_xml_to_shares,
-    _convert_xml_to_directories_and_files,
-    _convert_xml_to_ranges,
-    _convert_xml_to_share_stats,
-    _parse_file,
-    _parse_share,
-    _parse_directory,
-)
-from ..sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from ..storageclient import StorageClient
-from os import path
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-class FileService(StorageClient):
-
-    '''
-    The Server Message Block (SMB) protocol is the preferred file share protocol
-    used on premise today. The Microsoft Azure File service enables customers to
-    leverage the availability and scalability of Azure’s Cloud Infrastructure as
-    a Service (IaaS) SMB without having to rewrite SMB client applications.
-
-    The Azure File service also offers a compelling alternative to traditional
-    Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which
-    are often complex and expensive to install, configure, and operate. 
-    '''
-    MAX_SINGLE_GET_SIZE = 64 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024
-    MAX_RANGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, 
-                 request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'file',
-            account_name=account_name, 
-            account_key=account_key, 
-            sas_token=sas_token, 
-            protocol=protocol, 
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string)
-            
-        super(FileService, self).__init__(service_params)
-
-        if self.account_name == DEV_ACCOUNT_NAME:
-            raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-    def make_file_url(self, share_name, directory_name, file_name, 
-                      protocol=None, sas_token=None):
-        '''
-        Creates the url to access a file.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when FileService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: file access URL.
-        :rtype: str
-        '''
-
-        if directory_name is None:
-            url = '{}://{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                file_name,
-            )
-        else:
-            url = '{}://{}/{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                directory_name,
-                file_name,
-            )
-
-        if sas_token:
-            url += '?' + sas_token
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission, 
-                                        expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the file service.
-        Use the returned signature with the sas_token parameter of the FileService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.FILE, resource_types, permission, 
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_share_shared_access_signature(self, share_name, 
-                                         permission=None, 
-                                         expiry=None,
-                                         start=None, 
-                                         id=None,
-                                         ip=None,
-                                         protocol=None,
-                                         cache_control=None,
-                                         content_disposition=None,
-                                         content_encoding=None,
-                                         content_language=None,
-                                         content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_share(
-            share_name,
-            permission, 
-            expiry,
-            start=start, 
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_file_shared_access_signature(self, share_name, 
-                                         directory_name=None, 
-                                         file_name=None,
-                                         permission=None, 
-                                         expiry=None,
-                                         start=None, 
-                                         id=None,
-                                         ip=None,
-                                         protocol=None,
-                                         cache_control=None,
-                                         content_disposition=None,
-                                         content_encoding=None,
-                                         content_language=None,
-                                         content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so 
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_file(
-            share_name,
-            directory_name,
-            file_name,
-            permission, 
-            expiry,
-            start=start, 
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def set_file_service_properties(self, hour_metrics=None, minute_metrics=None, 
-                                    cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's File service, including
-        Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for files.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for files.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list of :class:`~azure.storage.models.CorsRule`
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path()
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),         
-        ]
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors))
-
-        self._perform_request(request)
-
-    def get_file_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's File service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The file service properties.
-        :rtype:
-            :class:`~azure.storage.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path()
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),         
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_service_properties(response.body)
-
-    def list_shares(self, prefix=None, marker=None, num_results=None, 
-                    include_metadata=False, timeout=None):
-        '''
-        Returns a generator to list the shares under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all shares have been returned or num_results 
-        is reached.
-
-        If num_results is specified and the account has more than that number of 
-        shares, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of shares to return.
-        :param bool include_metadata:
-            Specifies that share metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        include = 'metadata' if include_metadata else None
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, 
-                'include': include, 'timeout': timeout}
-        resp = self._list_shares(**kwargs)
-
-        return ListGenerator(resp, self._list_shares, (), kwargs)
-
-    def _list_shares(self, prefix=None, marker=None, max_results=None, 
-                     include=None, timeout=None):
-        '''
-        Returns a list of the shares under the specified account.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of shares to return. A single list
-            request may return up to 1000 shares and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param string include:
-            Include this parameter to specify that the share's
-            metadata be returned as part of the response body. set this
-            parameter to string 'metadata' to get share's metadata.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path()
-        request.query = [
-            ('comp', 'list'),
-            ('prefix', _to_str(prefix)),
-            ('marker', _to_str(marker)),
-            ('maxresults', _int_to_str(max_results)),
-            ('include', _to_str(include)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_shares(response)
-
-    def create_share(self, share_name, metadata=None, quota=None,
-                     fail_on_exist=False, timeout=None):
-        '''
-        Creates a new share under the specified account. If the share
-        with the same name already exists, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of share to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: a dict of str to str:
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5TB (5120).
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the share exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if share is created, False if share already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-share-quota', _int_to_str(quota))]
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_share_properties(self, share_name, timeout=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified share. The data returned does not include the shares's
-        list of files or directories.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A Share that exposes properties and metadata.
-        :rtype: :class:`.Share`
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_share(share_name, response)
-
-    def set_share_properties(self, share_name, quota, timeout=None):
-        '''
-        Sets service-defined properties for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5 TB (5120 GB).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('quota', quota)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-share-quota', _int_to_str(quota))]
-
-        self._perform_request(request)
-
-    def get_share_metadata(self, share_name, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the share metadata name, value pairs.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_metadata(response)
-
-    def set_share_metadata(self, share_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        share. Each call to this operation replaces all existing metadata
-        attached to the share. To remove all metadata from the share,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param metadata:
-            A dict containing name-value pairs to associate with the share as 
-            metadata. Example: {'category':'test'}
-        :type metadata: a dict mapping str to str
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-meta-name-values', metadata)]
-
-        self._perform_request(request)
-
-    def get_share_acl(self, share_name, timeout=None):
-        '''
-        Gets the permissions for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the share.
-        :rtype: dict of str to :class:`.AccessPolicy`
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_signed_identifiers(response.body)
-
-    def set_share_acl(self, share_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets the permissions for the specified share or stored access 
-        policies that may be used with Shared Access Signatures.
-
-        :param str share_name:
-            Name of existing share.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the share. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict of str to :class:`.AccessPolicy`
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        self._perform_request(request)
-
-    def get_share_stats(self, share_name, timeout=None):
-        '''
-        Gets the approximate size of the data stored on the share,
-        rounded up to the nearest gigabyte.
-        
-        Note that this value may not include all recently created
-        or recently resized files.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the approximate size of the data stored on the share.
-        :rtype: int
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('comp', 'stats'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_share_stats(response)
-
-    def delete_share(self, share_name, fail_not_exist=False, timeout=None):
-        '''
-        Marks the specified share for deletion. If the share
-        does not exist, the operation fails on the service. By 
-        default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of share to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the share doesn't
-            exist. False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if share is deleted, False share doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(share_name)
-        request.query = [
-            ('restype', 'share'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def create_directory(self, share_name, directory_name, metadata=None,
-                         fail_on_exist=False, timeout=None):
-        '''
-        Creates a new directory under the specified share or parent directory. 
-        If the directory with the same name already exists, the operation fails
-        on the service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to create, including the path to the parent 
-            directory.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict of str to str:
-        :param bool fail_on_exist:
-            specify whether to throw an exception when the directory exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is created, False if directory already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name)
-        request.query = [
-            ('restype', 'directory'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-meta-name-values', metadata)]
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def delete_directory(self, share_name, directory_name,
-                         fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified empty directory. Note that the directory must
-        be empty before it can be deleted. Attempting to delete directories 
-        that are not empty will fail.
-
-        If the directory does not exist, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to delete, including the path to the parent 
-            directory.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the directory doesn't
-            exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is deleted, False otherwise.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name)
-        request.query = [
-            ('restype', 'directory'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_directory_properties(self, share_name, directory_name, timeout=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified directory. The data returned does not include the directory's
-        list of files.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-           The path to an existing directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified directory within a directory object.
-        :rtype: :class:`~azure.storage.file.models.Directory`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name)
-        request.query = [
-            ('restype', 'directory'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_directory(directory_name, response)
-
-    def get_directory_metadata(self, share_name, directory_name, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified directory.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the directory metadata name, value pairs.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name)
-        request.query = [
-            ('restype', 'directory'),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_metadata(response)
-
-    def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        directory. Each call to this operation replaces all existing metadata
-        attached to the directory. To remove all metadata from the directory,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param metadata:
-            A dict containing name-value pairs to associate with the directory
-            as metadata. Example: {'category':'test'}
-        :type metadata: A dict mapping str to str.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name)
-        request.query = [
-            ('restype', 'directory'),
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-meta-name-values', metadata)]
-
-        self._perform_request(request)
-
-    def list_directories_and_files(self, share_name, directory_name=None, 
-                                   num_results=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the directories and files under the specified share.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all directories and files have been returned or
-        num_results is reached.
-
-        If num_results is specified and the share has more than that number of 
-        containers, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int num_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        args = (share_name, directory_name)
-        kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout}
-        resp = self._list_directories_and_files(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_directories_and_files, args, kwargs)
-
-    def _list_directories_and_files(self, share_name, directory_name=None, 
-                                   marker=None, max_results=None, timeout=None):
-        '''
-        Returns a list of the directories and files under the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name)
-        request.query = [
-            ('restype', 'directory'),
-            ('comp', 'list'),
-            ('marker', _to_str(marker)),
-            ('maxresults', _int_to_str(max_results)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_directories_and_files(response)
-
-    def get_file_properties(self, share_name, directory_name, file_name, timeout=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file. Returns an instance of :class:`.File` with
-        :class:`.FileProperties` and a metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: a file object including properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-
-        response = self._perform_request(request)
-        return _parse_file(file_name, response)
-
-    def exists(self, share_name, directory_name=None, file_name=None, timeout=None):
-        '''
-        Returns a boolean indicating whether the share exists if only share name is
-        given. If directory_name is specificed a boolean will be returned indicating
-        if the directory exists. If file_name is specified as well, a boolean will be
-        returned indicating if the file exists.
-
-        :param str share_name:
-            Name of a share.
-        :param str directory_name:
-            The path to a directory.
-        :param str file_name:
-            Name of a file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        try:
-            if file_name is not None:
-                self.get_file_properties(share_name, directory_name, file_name, timeout=timeout)
-            elif directory_name is not None:
-                self.get_directory_properties(share_name, directory_name, timeout=timeout)
-            else:
-                self.get_share_properties(share_name, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def resize_file(self, share_name, directory_name, 
-                    file_name, content_length, timeout=None):
-        '''
-        Resizes a file to the specified size. If the specified byte
-        value is less than the current size of the file, then all
-        ranges above the specified byte value are cleared.
-        
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int content_length:
-            The length to resize the file to.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-content-length', _to_str(content_length))]
-
-        self._perform_request(request)
-
-    def set_file_properties(self, share_name, directory_name, file_name, 
-                            content_settings, timeout=None):
-        '''
-        Sets system properties on the file. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set the file properties.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_settings', content_settings)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = None
-        request.headers = content_settings._to_headers()
-
-        self._perform_request(request)
-
-    def get_file_metadata(self, share_name, directory_name, file_name, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the file metadata name, value pairs.
-        :rtype: dict mapping str to str.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _parse_metadata(response)
-
-    def set_file_metadata(self, share_name, directory_name, 
-                          file_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified file as one or more
-        name-value pairs.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the file. To remove all
-            metadata from the file, call this operation with no metadata headers.
-        :type metadata: dict mapping str to str
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-meta-name-values', metadata)]
-
-        self._perform_request(request)
-
-    def copy_file(self, share_name, directory_name, file_name, copy_source,
-                  metadata=None, timeout=None):
-        '''
-        Copies a blob or file to a destination file within the storage account. 
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str copy_source:
-            Specifies the URL of the source blob or file, up to 2 KB in length. 
-            A source file in the same account can be private, but a file in another account
-            must be public or accept credentials included in this URL, such as
-            a Shared Access Signature. Examples:
-            https://myaccount.file.core.windows.net/myshare/mydirectory/myfile
-        :param metadata:
-            Dict containing name, value pairs.
-        :type metadata: A dict mapping str to str.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.file.models.CopyProperties`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_source', copy_source)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [
-            ('x-ms-copy-source', _to_str(copy_source)),
-            ('x-ms-meta-name-values', metadata),
-        ]
-
-        response = self._perform_request(request)
-        props = _parse_properties(response, FileProperties)
-        return props.copy
-
-    def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None):
-        '''
-         Aborts a pending copy_file operation, and leaves a destination file
-         with zero length and full metadata.
-
-        :param str share_name:
-             Name of destination share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-             Name of destination file.
-        :param str copy_id:
-            Copy identifier provided in the copy.id of the original
-            copy_file operation.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'copy'),
-            ('copyid', _to_str(copy_id)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('x-ms-copy-action', 'abort'),
-        ]
-
-        self._perform_request(request)
-
-    def delete_file(self, share_name, directory_name, file_name, timeout=None):
-        '''
-        Marks the specified file for deletion. The file is later
-        deleted during garbage collection.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-
-        self._perform_request(request)
-
-    def create_file(self, share_name, directory_name, file_name,
-                    content_length, content_settings=None, metadata=None, 
-                    timeout=None):
-        '''
-        Creates a new file.
-
-        See create_file_from_* for high level functions that handle the
-        creation and upload of large files with automatic chunking and
-        progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param int content_length:
-            Length of the file in bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: a dict mapping str to str
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [
-            ('x-ms-meta-name-values', metadata),
-            ('x-ms-content-length', _to_str(content_length)),
-            ('x-ms-type', 'file')
-        ]
-        if content_settings is not None:
-            request.headers += content_settings._to_headers()
-
-        self._perform_request(request)
-
-    def create_file_from_path(self, share_name, directory_name, file_name, 
-                           local_file_path, content_settings=None,
-                           metadata=None, progress_callback=None,
-                           max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
-        '''
-        Creates a new azure file from a local file path, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str local_file_path:
-            Path of the local file to upload as the file content.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used for setting file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the file size
-            exceeds 64MB.
-            Set to 1 to upload the file chunks sequentially.
-            Set to 2 or more to upload the file chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('local_file_path', local_file_path)
-
-        count = path.getsize(local_file_path)
-        with open(local_file_path, 'rb') as stream:
-            self.create_file_from_stream(
-                share_name, directory_name, file_name, stream,
-                count, content_settings, metadata, progress_callback,
-                max_connections, max_retries, retry_wait, timeout)
-
-    def create_file_from_text(self, share_name, directory_name, file_name, 
-                           text, encoding='utf-8', content_settings=None,
-                           metadata=None, timeout=None):
-        '''
-        Creates a new file from str/unicode, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str text:
-            Text to upload to the file.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: a dict mapping str to str
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        self.create_file_from_bytes(
-            share_name, directory_name, file_name, text, 0,
-            len(text), content_settings, metadata, timeout)
-
-    def create_file_from_bytes(
-        self, share_name, directory_name, file_name, file,
-        index=0, count=None, content_settings=None, metadata=None,
-        progress_callback=None, max_connections=1, max_retries=5,
-        retry_wait=1.0, timeout=None):
-        '''
-        Creates a new file from an array of bytes, or updates the content
-        of an existing file, with automatic chunking and progress
-        notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str file:
-            Content of file as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the file size
-            exceeds 64MB.
-            Set to 1 to upload the file chunks sequentially.
-            Set to 2 or more to upload the file chunks in parallel. This uses
-            more system resources but will upload faster.
-        :param int max_retries:
-            Number of times to retry upload of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file', file)
-        _validate_type_bytes('file', file)
-
-        if index < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(file) - index
-
-        stream = BytesIO(file)
-        stream.seek(index)
-
-        self.create_file_from_stream(
-            share_name, directory_name, file_name, stream, count,
-            content_settings, metadata, progress_callback,
-            max_connections, max_retries, retry_wait, timeout)
-
-    def create_file_from_stream(
-        self, share_name, directory_name, file_name, stream, count,
-        content_settings=None, metadata=None, progress_callback=None,
-        max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
-        '''
-        Creates a new file from a file/stream, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the file content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a
-            file cannot be created if the count is unknown.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: a dict mapping str to str
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the file size
-            exceeds 64MB.
-            Set to 1 to upload the file chunks sequentially.
-            Set to 2 or more to upload the file chunks in parallel. This uses
-            more system resources but will upload faster.
-            Note that parallel upload requires the stream to be seekable.
-        :param int max_retries:
-            Number of times to retry upload of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-
-        if count < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        self.create_file(
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            content_settings,
-            metadata,
-            timeout
-        )
-
-        _upload_file_chunks(
-            self,
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            self.MAX_RANGE_SIZE,
-            stream,
-            max_connections,
-            max_retries,
-            retry_wait,
-            progress_callback,
-            timeout
-        )
-
-    def _get_file(self, share_name, directory_name, file_name,
-                 start_range=None, end_range=None,
-                 range_get_content_md5=None, timeout=None):
-        '''
-        Downloads a file's content, metadata, and properties. You can specify a
-        range if you don't need to download the file in its entirety. If no range
-        is specified, the full file will be downloaded.
-
-        See get_file_to_* for high level functions that handle the download
-        of large files with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A File with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=range_get_content_md5)
-
-        response = self._perform_request(request, None)
-        return _parse_file(file_name, response)
-
-    def get_file_to_path(self, share_name, directory_name, file_name, file_path,
-                         open_mode='wb', start_range=None, end_range=None,
-                         range_get_content_md5=None, progress_callback=None,
-                         max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
-        '''
-        Downloads a file to a file path, with automatic chunking and progress
-        notifications. Returns an instance of File with properties and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str file_path:
-            Path of file to write to.
-        :param str open_mode:
-            Mode to use when opening the file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the file sequentially.
-            Set to 2 or greater if you want to download a file larger than 64MB in chunks.
-            If the file size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        with open(file_path, open_mode) as stream:
-            file = self.get_file_to_stream(
-                share_name, directory_name, file_name, stream,
-                start_range, end_range, range_get_content_md5,
-                progress_callback, max_connections, max_retries,
-                retry_wait, timeout)
-
-        return file
-
-    def get_file_to_stream(
-        self, share_name, directory_name, file_name, stream,
-        start_range=None, end_range=None, range_get_content_md5=None,
-        progress_callback=None, max_connections=1, max_retries=5,
-        retry_wait=1.0, timeout=None):
-        '''
-        Downloads a file to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`File` with properties
-        and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param io.IOBase stream:
-            Opened file/stream to write to.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the file sequentially.
-            Set to 2 or greater if you want to download a file larger than 64MB in chunks.
-            If the file size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-
-        if sys.version_info >= (3,) and max_connections > 1 and not stream.seekable():
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        # Only get properties if parallelism will actually be used
-        file_size = None
-        if max_connections > 1 and range_get_content_md5 is None:
-            file = self.get_file_properties(share_name, directory_name, 
-                                            file_name, timeout=timeout)
-            file_size = file.properties.content_length
-
-            # If file size is large, use parallel download
-            if file_size >= self.MAX_SINGLE_GET_SIZE:
-                _download_file_chunks(
-                    self,
-                    share_name,
-                    directory_name,
-                    file_name,
-                    file_size,
-                    self.MAX_CHUNK_GET_SIZE,
-                    start_range,
-                    end_range,
-                    stream,
-                    max_connections,
-                    max_retries,
-                    retry_wait,
-                    progress_callback, 
-                    timeout
-                )
-                return file
-
-        # If parallelism is off or the file is small, do a single download
-        download_size = _get_download_size(start_range, end_range, file_size)
-        if progress_callback:
-            progress_callback(0, download_size)
-
-        file = self._get_file(
-            share_name,
-            directory_name,
-            file_name,
-            start_range=start_range,
-            end_range=end_range,
-            range_get_content_md5=range_get_content_md5,
-            timeout=timeout)
-
-        if file.content is not None:
-            stream.write(file.content)
-
-        if progress_callback:
-            download_size = len(file.content)
-            progress_callback(download_size, download_size)
-
-        file.content = None # Clear file content since output has been written to user stream
-        return file
-
-    def get_file_to_bytes(self, share_name, directory_name, file_name, 
-                          start_range=None, end_range=None, range_get_content_md5=None,
-                          progress_callback=None, max_connections=1, max_retries=5,
-                          retry_wait=1.0, timeout=None):
-        '''
-        Downloads a file as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`File` with
-        properties, metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the file sequentially.
-            Set to 2 or greater if you want to download a file larger than 64MB in chunks.
-            If the file size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-
-        stream = BytesIO()
-        file = self.get_file_to_stream(
-            share_name,
-            directory_name,
-            file_name,
-            stream,
-            start_range,
-            end_range,
-            range_get_content_md5,
-            progress_callback,
-            max_connections,
-            max_retries,
-            retry_wait,
-            timeout)
-
-        file.content = stream.getvalue()
-        return file
-
-    def get_file_to_text(
-        self, share_name, directory_name, file_name, encoding='utf-8',
-        start_range=None, end_range=None, range_get_content_md5=None,
-        progress_callback=None, max_connections=1, max_retries=5,
-        retry_wait=1.0, timeout=None):
-        '''
-        Downloads a file as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`File` with properties,
-        metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str encoding:
-            Python encoding to use when decoding the file data.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool range_get_content_md5:
-            When this header is set to True and specified together
-            with the Range header, the service returns the MD5 hash for the
-            range, as long as the range is less than or equal to 4 MB in size.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: callback function in format of func(current, total)
-        :param int max_connections:
-            Set to 1 to download the file sequentially.
-            Set to 2 or greater if you want to download a file larger than 64MB in chunks.
-            If the file size does not exceed 64MB it will be downloaded in one chunk.
-        :param int max_retries:
-            Number of times to retry download of file chunk if an error occurs.
-        :param int retry_wait:
-            Sleep time in secs between retries.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('encoding', encoding)
-
-        file = self.get_file_to_bytes(
-            share_name,
-            directory_name,
-            file_name,
-            start_range,
-            end_range,
-            range_get_content_md5,
-            progress_callback,
-            max_connections,
-            max_retries,
-            retry_wait,
-            timeout)
-
-        file.content = file.content.decode(encoding)
-        return file
-
-    def update_range(self, share_name, directory_name, file_name, data, 
-                     start_range, end_range, content_md5=None, timeout=None):
-        '''
-        Writes the bytes specified by the request body into the specified range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bytes data:
-            Content of the range.
-        :param int start_range:
-            Start of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param str content_md5:
-            An MD5 hash of the range content. This hash is used to
-            verify the integrity of the range during transport. When this header
-            is specified, the storage service compares the hash of the content
-            that has arrived with the header value that was sent. If the two
-            hashes do not match, the operation will fail with error code 400
-            (Bad Request).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('data', data)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'range'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('Content-MD5', _to_str(content_md5)),
-            ('x-ms-write', 'update'),
-        ]
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-        request.body = _get_request_body_bytes_only('data', data)
-
-        self._perform_request(request)
-
-    def clear_range(self, share_name, directory_name, file_name, start_range,
-                    end_range, timeout=None):
-        '''
-        Clears the specified range and releases the space used in storage for 
-        that range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'range'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [
-            ('Content-Length', '0'),
-            ('x-ms-write', 'clear'),
-        ]
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-
-        self._perform_request(request)
-
-    def list_ranges(self, share_name, directory_name, file_name,
-                    start_range=None, end_range=None, timeout=None):
-        '''
-        Retrieves the valid ranges for a file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Specifies the start offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            Specifies the end offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: a list of valid ranges
-        :rtype: a list of :class:`.FileRange`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = [
-            ('comp', 'rangelist'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False)
-
-        response = self._perform_request(request)
-        return _convert_xml_to_ranges(response)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/models.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/models.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/file/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/file/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,394 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .._common_conversion import _to_str
-class Share(object):
-
-    '''
-    File share class.
-    
-    :ivar str name:
-        The name of the share.
-    :ivar ShareProperties properties:
-        System properties for the share.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the share as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list shares operation. If this parameter was specified but the 
-        share has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict mapping str to str
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or ShareProperties()
-        self.metadata = metadata
-
-
-class ShareProperties(object):
-
-    '''
-    File share's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the share was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int quote:
-        Returns the current share quota in GB.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.quota = None
-
-class Directory(object):
-
-    '''
-    Directory class.
-    
-    :ivar str name:
-        The name of the directory.
-    :ivar DirectoryProperties properties:
-        System properties for the directory.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the directory as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list directory operation. If this parameter was specified but the 
-        directory has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict mapping str to str
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or DirectoryProperties()
-        self.metadata = metadata
-
-class DirectoryProperties(object):
-
-    '''
-    File directory's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the directory was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-
-class File(object):
-
-    '''
-    File class.
-    
-    :ivar str name:
-        The name of the file.
-    :ivar content:
-        File content.
-    :vartype content: str or bytes
-    :ivar FileProperties properties:
-        System properties for the file.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the file as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list file operation. If this parameter was specified but the 
-        file has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict mapping str to str
-    '''
-
-    def __init__(self, name=None, content=None, props=None, metadata=None):
-        self.name = name
-        self.content = content
-        self.properties = props or FileProperties()
-        self.metadata = metadata
-
-
-class FileProperties(object):
-
-    '''
-    File Properties.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the file was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The size of the file in bytes.
-    :ivar ~azure.storage.file.models.ContentSettings content_settings:
-        Stores all the content settings for the file.
-    :ivar ~azure.storage.file.models.CopyProperties copy:
-        Stores all the copy properties for the file.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_settings = ContentSettings()
-        self.copy = CopyProperties()
-
-
-class ContentSettings(object):
-
-    '''
-    Used to store the content settings of a file.
-
-    :ivar str content_type:
-        The content type specified for the file. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If content_encoding has previously been set
-        for the file, that value is stored.
-    :ivar str content_language:
-        If content_language has previously been set
-        for the file, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the file, that value is stored.
-    :ivar str cache_control:
-        If cache_control has previously been set for
-        the file, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the file, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-        self, content_type=None, content_encoding=None,
-        content_language=None, content_disposition=None,
-        cache_control=None, content_md5=None):
-        
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return [
-            ('x-ms-cache-control', _to_str(self.cache_control)),
-            ('x-ms-content-type', _to_str(self.content_type)),
-            ('x-ms-content-disposition',
-                _to_str(self.content_disposition)),
-            ('x-ms-content-md5', _to_str(self.content_md5)),
-            ('x-ms-content-encoding',
-                _to_str(self.content_encoding)),
-            ('x-ms-content-language',
-                _to_str(self.content_language)),
-        ]
-
-
-class CopyProperties(object):
-    '''
-    File Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy File operation where this file
-        was the destination file. This header does not appear if this file has never
-        been the destination in a Copy File operation, or if this file has been
-        modified after a concluded Copy File operation using Set File Properties or
-        Put File.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source file used in the last attempted
-        Copy File operation where this file was the destination file. This header does not
-        appear if this file has never been the destination in a Copy File operation, or if
-        this file has been modified after a concluded Copy File operation using
-        Set File Properties or Put File.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending: 
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don’t cause failure.
-            aborted:
-                Copy was ended by Abort Copy File.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy File operation where this file was the destination file. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy File operation where this file was the
-        destination file. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure. 
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class FileRange(object):
-
-    '''
-    File Range.
-    
-    :ivar int start:
-        Byte index for start of file range.
-    :ivar int end:
-        Byte index for end of file range.
-    '''
-
-    def __init__(self, start=None, end=None):
-        self.start = start
-        self.end = end
-
-
-class FilePermissions(object):
-
-    '''
-    FilePermissions class to be used with 
-    :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API.
-
-    :ivar FilePermissions FilePermissions.CREATE:
-        Create a new file or copy a file to a new file.
-    :ivar FilePermissions FilePermissions.DELETE: 
-        Delete the file.
-    :ivar FilePermissions FilePermissions.READ:
-        Read the content, properties, metadata. Use the file as the source of a copy 
-        operation.
-    :ivar FilePermissions FilePermissions.WRITE: 
-        Create or write content, properties, metadata. Resize the file. Use the file 
-        as the destination of a copy operation within the same account.
-    '''
-    def __init__(self, read=False, create=False, write=False, delete=False, 
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata. Use the file as the source of a copy 
-            operation.
-        :param bool create:
-            Create a new file or copy a file to a new file.
-        :param bool write: 
-            Create or write content, properties, metadata. Resize the file. Use the file 
-            as the destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the file.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-    
-    def __or__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-FilePermissions.CREATE = FilePermissions(create=True)
-FilePermissions.DELETE = FilePermissions(delete=True)
-FilePermissions.READ = FilePermissions(read=True)
-FilePermissions.WRITE = FilePermissions(write=True)
-
-
-class SharePermissions(object):
-
-    '''
-    SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature`
-    method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. 
-
-    :ivar SharePermissions FilePermissions.DELETE: 
-        Delete any file in the share.
-        Note: You cannot grant permissions to delete a share with a service SAS. Use 
-        an account SAS instead.
-    :ivar SharePermissions FilePermissions.LIST: 
-        List files and directories in the share.
-    :ivar SharePermissions FilePermissions.READ:
-        Read the content, properties or metadata of any file in the share. Use any 
-        file in the share as the source of a copy operation.
-    :ivar SharePermissions FilePermissions.WRITE: 
-        For any file in the share, create or write content, properties or metadata. 
-        Resize the file. Use the file as the destination of a copy operation within 
-        the same account.
-        Note: You cannot grant permissions to read or write share properties or 
-        metadata with a service SAS. Use an account SAS instead.
-    '''
-    def __init__(self, read=False, write=False, delete=False, list=False, 
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties or metadata of any file in the share. Use any 
-            file in the share as the source of a copy operation.
-        :param bool write: 
-            For any file in the share, create or write content, properties or metadata. 
-            Resize the file. Use the file as the destination of a copy operation within 
-            the same account.
-            Note: You cannot grant permissions to read or write share properties or 
-            metadata with a service SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any file in the share.
-            Note: You cannot grant permissions to delete a share with a service SAS. Use 
-            an account SAS instead.
-        :param bool list: 
-            List files and directories in the share.
-        :param str _str: 
-            A string representing the permissions
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-    
-    def __or__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') + 
-                ('l' if self.list else ''))
-
-SharePermissions.DELETE = SharePermissions(delete=True)
-SharePermissions.LIST = SharePermissions(list=True)
-SharePermissions.READ = SharePermissions(read=True)
-SharePermissions.WRITE = SharePermissions(write=True)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/models.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/models.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,535 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-if sys.version_info < (3,):
-    _unicode_type = unicode
-else:
-    _unicode_type = str
-
-from ._error import (
-    _validate_not_none,
-)
-
-class _HeaderDict(dict):
-
-    def __getitem__(self, index):
-        return super(_HeaderDict, self).__getitem__(index.lower())
-
-class _list(list):
-    '''Used so that additional properties can be set on the return list'''
-    pass
-
-class _dict(dict):
-    '''Used so that additional properties can be set on the return dictionary'''
-    pass
-
-class ListGenerator(object):
-    '''
-    A generator object used to list storage resources. The generator will lazily 
-    follow the continuation tokens returned by the service and stop when all 
-    resources have been returned or max_results is reached.
-
-    If max_results is specified and the account has more than that number of 
-    resources, the generator will have a populated next_marker field once it 
-    finishes. This marker can be used to create a new generator if more 
-    results are desired.
-    '''
-    def __init__(self, resources, list_method, list_args, list_kwargs):
-        self.items = resources
-        self.next_marker = resources.next_marker
-
-        self._list_method = list_method
-        self._list_args = list_args
-        self._list_kwargs = list_kwargs
-
-    def __iter__(self):
-        # return results
-        for i in self.items:
-            yield i
-
-        while True:
-            # if no more results on the service, return
-            if not self.next_marker:
-                break
-
-            # update the marker args
-            self._list_kwargs['marker'] = self.next_marker
-
-            # handle max results, if present
-            max_results = self._list_kwargs.get('max_results')
-            if max_results is not None:
-                max_results = max_results - len(self.items)
-
-                # if we've reached max_results, return
-                # else, update the max_results arg
-                if max_results <= 0:
-                    break
-                else:
-                    self._list_kwargs['max_results'] = max_results
-
-            # get the next segment
-            resources = self._list_method(*self._list_args, **self._list_kwargs)
-            self.items = resources
-            self.next_marker = resources.next_marker
-
-            # return results
-            for i in self.items:
-                yield i
-
-class RetentionPolicy(object):
-
-    '''
-    By default, Storage Analytics will not delete any logging or metrics data. Blobs 
-    and table entities will continue to be written until the shared 20TB limit is 
-    reached. Once the 20TB limit is reached, Storage Analytics will stop writing 
-    new data and will not resume until free space is available. This 20TB limit 
-    is independent of the total limit for your storage account.
-
-    There are two ways to delete Storage Analytics data: by manually making deletion 
-    requests or by setting a data retention policy. Manual requests to delete Storage 
-    Analytics data are billable, but delete requests resulting from a retention policy 
-    are not billable.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled: 
-            Indicates whether a retention policy is enabled for the 
-            storage service. If disabled, logging and metrics data will be retained 
-            infinitely by the service unless explicitly deleted.
-        :param int days: 
-            Required if enabled is true. Indicates the number of 
-            days that metrics or logging data should be retained. All data older 
-            than this value will be deleted. The minimum value you can specify is 1; 
-            the largest value is 365 (one year).
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class Logging(object):
-
-    '''
-    Storage Analytics logs detailed information about successful and failed requests 
-    to a storage service. This information can be used to monitor individual requests 
-    and to diagnose issues with a storage service. Requests are logged on a best-effort 
-    basis.
-
-    All logs are stored in block blobs in a container named $logs, which is
-    automatically created when Storage Analytics is enabled for a storage account. 
-    The $logs container is located in the blob namespace of the storage account. 
-    This container cannot be deleted once Storage Analytics has been enabled, though 
-    its contents can be deleted.
-
-    For more information, see  https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
-    '''
-
-    def __init__(self, delete=False, read=False, write=False,
-                 retention_policy=None):
-        '''
-        :param bool delete: 
-            Indicates whether all delete requests should be logged.
-        :param bool read: 
-            Indicates whether all read requests should be logged.
-        :param bool write: 
-            Indicates whether all write requests should be logged.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("read", read)
-        _validate_not_none("write", write)
-        _validate_not_none("delete", delete)
-
-        self.version = u'1.0'
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class Metrics(object):
-
-    ''' 
-    Metrics include aggregated transaction statistics and capacity data about requests 
-    to a storage service. Transactions are reported at both the API operation level 
-    as well as at the storage service level, and capacity is reported at the storage 
-    service level. Metrics data can be used to analyze storage service usage, diagnose 
-    issues with requests made against the storage service, and to improve the 
-    performance of applications that use a service.
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
-    '''
-
-    def __init__(self, enabled=False, include_apis=None,
-                 retention_policy=None):
-        '''
-        :param bool enabled: 
-            Indicates whether metrics are enabled for 
-            the service.
-        :param bool include_apis: 
-            Required if enabled is True. Indicates whether metrics 
-            should generate summary statistics for called API operations.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("include_apis", include_apis)
-
-        self.version = u'1.0'
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class CorsRule(object):
-
-    '''
-    CORS is an HTTP feature that enables a web application running under one domain 
-    to access resources in another domain. Web browsers implement a security 
-    restriction known as same-origin policy that prevents a web page from calling 
-    APIs in a different domain; CORS provides a secure way to allow one domain 
-    (the origin domain) to call APIs in another domain. 
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
-                 exposed_headers=None, allowed_headers=None):
-        '''
-        :param allowed_origins: 
-            A list of origin domains that will be allowed via CORS, or "*" to allow 
-            all domains. The list of must contain at least one entry. Limited to 64 
-            origin domains. Each allowed origin can have up to 256 characters.
-        :type allowed_origins: list of str
-        :param allowed_methods:
-            A list of HTTP methods that are allowed to be executed by the origin. 
-            The list of must contain at least one entry. For Azure Storage, 
-            permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-        :type allowed_methods: list of str
-        :param int max_age_in_seconds:
-            The number of seconds that the client/browser should cache a 
-            preflight response.
-        :param exposed_headers:
-            Defaults to an empty list. A list of response headers to expose to CORS 
-            clients. Limited to 64 defined headers and two prefixed headers. Each 
-            header can be up to 256 characters.
-        :type exposed_headers: list of str
-        :param allowed_headers:
-            Defaults to an empty list. A list of headers allowed to be part of 
-            the cross-origin request. Limited to 64 defined headers and 2 prefixed 
-            headers. Each header can be up to 256 characters.
-        :type allowed_headers: list of str
-        '''
-        _validate_not_none("allowed_origins", allowed_origins)
-        _validate_not_none("allowed_methods", allowed_methods)
-        _validate_not_none("max_age_in_seconds", max_age_in_seconds)
-
-        self.allowed_origins = allowed_origins if allowed_origins else list()
-        self.allowed_methods = allowed_methods if allowed_methods else list()
-        self.max_age_in_seconds = max_age_in_seconds
-        self.exposed_headers = exposed_headers if exposed_headers else list()
-        self.allowed_headers = allowed_headers if allowed_headers else list()
-
-
-class ServiceProperties(object):
-    ''' 
-    Returned by get_*_service_properties functions. Contains the properties of a 
-    storage service, including Analytics and CORS rules.
-
-    Azure Storage Analytics performs logging and provides metrics data for a storage 
-    account. You can use this data to trace requests, analyze usage trends, and 
-    diagnose issues with your storage account. To use Storage Analytics, you must 
-    enable it individually for each service you want to monitor.
-
-    The aggregated data is stored in a well-known blob (for logging) and in well-known 
-    tables (for metrics), which may be accessed using the Blob service and Table 
-    service APIs.
-
-    For an in-depth guide on using Storage Analytics and other tools to identify, 
-    diagnose, and troubleshoot Azure Storage-related issues, see 
-    http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
-
-    For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    pass
-
-
-class AccessPolicy(object):
-
-    ''' 
-    Access Policy class used by the set and get acl methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and 
-    permissions for the Shared Access Signatures with which it's associated. 
-    Depending on how you want to control access to your table resource, you can 
-    specify all of these parameters within the stored access policy, and omit 
-    them from the URL for the Shared Access Signature. Doing so permits you to 
-    modify the associated signature's behavior at any time, as well as to revoke 
-    it. Or you can specify one or more of the access policy parameters within 
-    the stored access policy, and the others on the URL. Finally, you can 
-    specify all of the parameters on the URL. In this case, you can use the 
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must 
-    include all fields required to authenticate the signature. If any required 
-    fields are missing, the request will fail. Likewise, if a field is specified 
-    both in the Shared Access Signature URL and in the stored access policy, the 
-    request will fail with status code 400 (Bad Request).
-    '''
-
-    def __init__(self, permission=None, expiry=None, start=None):
-        '''
-        :param str permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        '''
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class Protocol(object):
-    '''
-    Specifies the protocol permitted for a SAS token. Note that HTTP only is 
-    not allowed.
-    '''
-
-    HTTPS = 'https'
-    ''' Allow HTTPS requests only. '''
-
-    HTTPS_HTTP = 'https,http'
-    ''' Allow HTTP and HTTPS requests. '''
-
-
-class ResourceTypes(object):
-
-    '''
-    Specifies the resource types that are accessible with the account SAS.
-
-    :ivar ResourceTypes ResourceTypes.CONTAINER:
-        Access to container-level APIs (e.g., Create/Delete Container, 
-        Create/Delete Queue, Create/Delete Table, Create/Delete Share, 
-        List Blobs/Files and Directories) 
-    :ivar ResourceTypes ResourceTypes.OBJECT:
-        Access to object-level APIs for blobs, queue messages, table entities, and 
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-    :ivar ResourceTypes ResourceTypes.SERVICE:
-        Access to service-level APIs (e.g., Get/Set Service Properties, 
-        Get Service Stats, List Containers/Queues/Tables/Shares) 
-    '''
-    def __init__(self, service=False, container=False, object=False, _str=None):
-        '''
-        :param bool service:
-            Access to service-level APIs (e.g., Get/Set Service Properties, 
-            Get Service Stats, List Containers/Queues/Tables/Shares) 
-        :param bool container:
-            Access to container-level APIs (e.g., Create/Delete Container, 
-            Create/Delete Queue, Create/Delete Table, Create/Delete Share, 
-            List Blobs/Files and Directories) 
-        :param bool object:
-            Access to object-level APIs for blobs, queue messages, table entities, and 
-            files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-        :param str _str: 
-            A string representing the resource types.
-        '''
-        if not _str:
-            _str = ''
-        self.service = service or ('s' in _str)
-        self.container = container or ('c' in _str)
-        self.object = object or ('o' in _str)
-    
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-ResourceTypes.SERVICE = ResourceTypes(service=True)
-ResourceTypes.CONTAINER = ResourceTypes(container=True)
-ResourceTypes.OBJECT = ResourceTypes(object=True)
-
-
-class Services(object):
-
-    '''
-    Specifies the services accessible with the account SAS.
-
-    :ivar Services Services.BLOB: The blob service.
-    :ivar Services Services.FILE: The file service 
-    :ivar Services Services.QUEUE: The queue service.
-    :ivar Services Services.TABLE: The table service 
-    '''
-    def __init__(self, blob=False, queue=False, table=False, file=False, _str=None):
-        '''
-        :param bool blob:
-            Access to any blob service, for example, the `.BlockBlobService`
-        :param bool queue:
-            Access to the `.QueueService`
-        :param bool table:
-            Access to the `.TableService`
-        :param bool file:
-            Access to the `.FileService`
-        :param str _str: 
-            A string representing the services.
-        '''
-        if not _str:
-            _str = ''
-        self.blob = blob or ('b' in _str)
-        self.queue = queue or ('q' in _str)
-        self.table = table or ('t' in _str)
-        self.file = file or ('f' in _str)
-    
-    def __or__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Services(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('t' if self.table else '') +
-                ('f' if self.file else ''))
-
-Services.BLOB = Services(blob=True)
-Services.QUEUE = Services(queue=True)
-Services.TABLE = Services(table=True)
-Services.FILE = Services(file=True)
-
-
-class AccountPermissions(object):
-
-    '''
-    :class:`~ResourceTypes` class to be used with generate_shared_access_signature 
-    method and for the AccessPolicies used with set_*_acl. There are two types of 
-    SAS which may be used to grant resource access. One is to grant access to a 
-    specific resource (resource-specific). Another is to grant access to the 
-    entire service for a specific account and allow certain operations based on 
-    perms found here.
-
-    :ivar AccountPermissions AccountPermissions.ADD:
-        Valid for the following Object resource types only: queue messages, table 
-        entities, and append blobs. 
-    :ivar AccountPermissions AccountPermissions.CREATE:
-        Valid for the following Object resource types only: blobs and files. Users 
-        can create new blobs or files, but may not overwrite existing blobs or files. 
-    :ivar AccountPermissions AccountPermissions.DELETE:
-        Valid for Container and Object resource types, except for queue messages. 
-    :ivar AccountPermissions AccountPermissions.LIST:
-        Valid for Service and Container resource types only. 
-    :ivar AccountPermissions AccountPermissions.PROCESS:
-        Valid for the following Object resource type only: queue messages. 
-    :ivar AccountPermissions AccountPermissions.READ:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits read permissions to the specified resource type. 
-    :ivar AccountPermissions AccountPermissions.UPDATE:
-        Valid for the following Object resource types only: queue messages and table 
-        entities. 
-    :ivar AccountPermissions AccountPermissions.WRITE:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits write permissions to the specified resource type. 
-    '''
-    def __init__(self, read=False, write=False, delete=False, list=False, 
-                 add=False, create=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits read permissions to the specified resource type.
-        :param bool write:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits write permissions to the specified resource type.
-        :param bool delete: 
-            Valid for Container and Object resource types, except for queue messages.
-        :param bool list:
-            Valid for Service and Container resource types only.
-        :param bool add:
-            Valid for the following Object resource types only: queue messages, 
-            table entities, and append blobs.
-        :param bool create:
-            Valid for the following Object resource types only: blobs and files. 
-            Users can create new blobs or files, but may not overwrite existing 
-            blobs or files.
-        :param bool update:
-            Valid for the following Object resource types only: queue messages and 
-            table entities.
-        :param bool process:
-            Valid for the following Object resource type only: queue messages.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-    
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-AccountPermissions.READ = AccountPermissions(read=True)
-AccountPermissions.WRITE = AccountPermissions(write=True)
-AccountPermissions.DELETE = AccountPermissions(delete=True)
-AccountPermissions.LIST = AccountPermissions(list=True)
-AccountPermissions.ADD = AccountPermissions(add=True)
-AccountPermissions.CREATE = AccountPermissions(create=True)
-AccountPermissions.UPDATE = AccountPermissions(update=True)
-AccountPermissions.PROCESS = AccountPermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .models import (
-    Queue,
-    QueueMessage,
-    QueuePermissions,
-    QueueMessageFormat,
-)
-
-from .queueservice import QueueService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,144 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from dateutil import parser
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    Queue,
-    QueueMessage,
-)
-from ..models import (
-    _list,
-)
-from .._deserialization import (
-    _int_to_str,
-    _parse_response_for_dict,
-    _parse_metadata,
-)
-
-def _parse_metadata_and_message_count(response):
-    '''
-    Extracts approximate messages count header.
-    '''
-    metadata = _parse_metadata(response)
-
-    headers = _parse_response_for_dict(response)
-    metadata.approximate_message_count = _int_to_str(headers.get('x-ms-approximate-messages-count'))
-
-    return metadata
-
-def _parse_queue_message_from_headers(response):
-    '''
-    Extracts pop receipt and time next visible from headers.
-    '''
-    headers = _parse_response_for_dict(response)
-
-    message = QueueMessage()
-    message.pop_receipt = headers.get('x-ms-popreceipt')
-    message.time_next_visible = parser.parse(headers.get('x-ms-time-next-visible'))
-    
-    return message
-
-def _convert_xml_to_queues(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Queues>
-        <Queue>
-          <Name>string-value</Name>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Queue>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    queues = _list()
-    list_element = ETree.fromstring(response.body)
-    
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(queues, 'next_marker', next_marker)
-
-    queues_element = list_element.find('Queues')
-
-    for queue_element in queues_element.findall('Queue'):
-        # Name element
-        queue = Queue()
-        queue.name = queue_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = queue_element.find('Metadata')
-        if metadata_root_element is not None:
-            queue.metadata = dict()
-            for metadata_element in metadata_root_element:
-                queue.metadata[metadata_element.tag] = metadata_element.text
-        
-        # Add queue to list
-        queues.append(queue)
-
-    return queues
-
-def _convert_xml_to_queue_messages(response, decode_function):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessagesList>
-        <QueueMessage>
-          <MessageId>string-message-id</MessageId>
-          <InsertionTime>insertion-time</InsertionTime>
-          <ExpirationTime>expiration-time</ExpirationTime>
-          <PopReceipt>opaque-string-receipt-data</PopReceipt>
-          <TimeNextVisible>time-next-visible</TimeNextVisible>
-          <DequeueCount>integer</DequeueCount>
-          <MessageText>message-body</MessageText>
-        </QueueMessage>
-    </QueueMessagesList>
-    '''
-    if response is None or response.body is None:
-        return response
-
-    messages = list()
-    list_element = ETree.fromstring(response.body)
-
-    for message_element in list_element.findall('QueueMessage'):
-        message = QueueMessage()
-
-        message.id = message_element.findtext('MessageId')
-        message.dequeue_count = message_element.findtext('DequeueCount')
-
-        message.content = decode_function(message_element.findtext('MessageText'))
-
-        message.insertion_time = parser.parse(message_element.findtext('InsertionTime'))
-        message.expiration_time = parser.parse(message_element.findtext('ExpirationTime'))
-        
-        message.pop_receipt = message_element.findtext('PopReceipt')
-
-        time_next_visible = message_element.find('TimeNextVisible')
-        if time_next_visible is not None:
-            message.time_next_visible = parser.parse(time_next_visible.text)
-
-        # Add message to list
-        messages.append(message)
-
-    return messages
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/_error.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,33 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-from .._error import (
-    _validate_type_bytes,
-)
-
-_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.'
-_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.'
-_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.'
-
-def _validate_message_type_text(param):
-    if sys.version_info < (3,):
-        if not isinstance(param, unicode):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE)
-    else:
-        if not isinstance(param, str):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR)
-
-def _validate_message_type_bytes(param):
-    _validate_type_bytes('message', param)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/_serialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,76 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    try:
-        from cStringIO import StringIO as BytesIO
-    except:
-        from StringIO import StringIO as BytesIO
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from xml.sax.saxutils import escape as xml_escape
-from .._common_conversion import (
-    _str,
-)
-
-def _get_path(queue_name=None, include_messages=None, message_id=None):
-    '''
-    Creates the path to access a queue resource.
-
-    queue_name:
-        Name of queue.
-    include_messages:
-        Whether or not to include messages.
-    message_id:
-        Message id.
-    '''
-    if queue_name and include_messages and message_id:
-        return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
-    if queue_name and include_messages:
-        return '/{0}/messages'.format(_str(queue_name))
-    elif queue_name:
-        return '/{0}'.format(_str(queue_name))
-    else:
-        return '/'
-
-
-def _convert_queue_message_xml(message_text, encode_function):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessage>
-        <MessageText></MessageText>
-    </QueueMessage>
-    '''
-    queue_message_element = ETree.Element('QueueMessage');
-
-    # Enabled
-    message_text = encode_function(message_text)
-    ETree.SubElement(queue_message_element, 'MessageText').text = message_text
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-        output = stream.getvalue()
-    finally:
-        stream.close()
-
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/models.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/models.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,246 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from xml.sax.saxutils import escape as xml_escape
-from xml.sax.saxutils import unescape as xml_unescape
-from base64 import (
-    b64encode,
-    b64decode,
-)
-from ._error import (
-    _validate_message_type_bytes,
-    _validate_message_type_text,
-    _ERROR_MESSAGE_NOT_BASE64,
-)
-
-class Queue(object):
-
-    ''' 
-    Queue class.
-     
-    :ivar str name: 
-        The name of the queue.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the queue as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list queues operation. If this parameter was specified but the 
-        queue has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict mapping str to str
-    '''
-
-    def __init__(self):
-        self.name = None
-        self.metadata = None
-
-
-class QueueMessage(object):
-    ''' 
-    Queue message class. 
-
-    :ivar str id: 
-        A GUID value assigned to the message by the Queue service that 
-        identifies the message in the queue. This value may be used together 
-        with the value of pop_receipt to delete a message from the queue after 
-        it has been retrieved with the get messages operation. 
-    :ivar date insertion_time: 
-        A UTC date value representing the time the messages was inserted.
-    :ivar date expiration_time: 
-        A UTC date value representing the time the message expires.
-    :ivar int dequeue_count: 
-        Begins with a value of 1 the first time the message is dequeued. This 
-        value is incremented each time the message is subsequently dequeued.
-    :ivar obj content: 
-        The message content. Type is determined by the decode_function set on 
-        the service. Default is str.
-    :ivar str pop_receipt: 
-        A receipt str which can be used together with the message_id element to 
-        delete a message from the queue after it has been retrieved with the get 
-        messages operation. Only returned by get messages operations. Set to 
-        None for peek messages.
-    :ivar date time_next_visible: 
-        A UTC date value representing the time the message will next be visible. 
-        Only returned by get messages operations. Set to None for peek messages.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.insertion_time = None
-        self.expiration_time = None
-        self.dequeue_count = None
-        self.content = None
-        self.pop_receipt = None
-        self.time_next_visible = None
-
-
-class QueueMessageFormat:
-    ''' 
-    Encoding and decoding methods which can be used to modify how the queue service 
-    encodes and decodes queue messages. Set these to queueservice.encode_function 
-    and queueservice.decode_function to modify the behavior. The defaults are 
-    text_xmlencode and text_xmldecode, respectively.
-    '''
-
-    @staticmethod
-    def text_base64encode(data):
-        '''
-        Base64 encode unicode text.
-        
-        :param str data: String to encode.
-        :return: Base64 encoded string.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return b64encode(data.encode('utf-8')).decode('utf-8')
-     
-    @staticmethod
-    def text_base64decode(data):   
-        '''
-        Base64 decode to unicode text.
-        
-        :param str data: String data to decode to unicode.
-        :return: Base64 decoded string.
-        :rtype: str
-        ''' 
-        try:
-            return b64decode(data.encode('utf-8')).decode('utf-8')
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def binary_base64encode(data):
-        '''
-        Base64 encode byte strings.
-        
-        :param str data: Binary string to encode.
-        :return: Base64 encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_bytes(data)
-        return b64encode(data).decode('utf-8')
-     
-    @staticmethod
-    def binary_base64decode(data):
-        '''
-        Base64 decode to byte string.
-        
-        :param str data: Data to decode to a byte string.
-        :return: Base64 decoded data.
-        :rtype: str
-        ''' 
-        try:
-            return b64decode(data.encode('utf-8'))
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def text_xmlencode(data):
-        ''' 
-        XML encode unicode text.
-
-        :param str data: Unicode string to encode
-        :return: XML encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return xml_escape(data)
-       
-    @staticmethod 
-    def text_xmldecode(data):
-        ''' 
-        XML decode to unicode text.
-
-        :param str data: Data to decode to unicode.
-        :return: XML decoded data.
-        :rtype: str
-        '''
-        return xml_unescape(data)
-
-    @staticmethod
-    def noencode(data):
-        ''' 
-        Do no encoding. 
-
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str
-        '''
-        return data
-        
-    @staticmethod
-    def nodecode(data):
-        '''
-        Do no decoding.
-        
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str        
-        '''
-        return data
-
-
-class QueuePermissions(object):
-
-    '''
-    QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature`
-    method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. 
-
-    :ivar QueuePermissions QueuePermissions.READ: 
-        Read metadata and properties, including message count. Peek at messages. 
-    :ivar QueuePermissions QueuePermissions.ADD: 
-        Add messages to the queue.
-    :ivar QueuePermissions QueuePermissions.UPDATE:
-        Update messages in the queue. Note: Use the Process permission with 
-        Update so you can first get the message you want to update.
-    :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities.
-        Get and delete messages from the queue. 
-    '''
-    def __init__(self, read=False, add=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Read metadata and properties, including message count. Peek at messages.
-        :param bool add:
-            Add messages to the queue.
-        :param bool update:
-            Update messages in the queue. Note: Use the Process permission with 
-            Update so you can first get the message you want to update.
-        :param bool process: 
-            Get and delete messages from the queue.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-    
-    def __or__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-QueuePermissions.READ = QueuePermissions(read=True)
-QueuePermissions.ADD = QueuePermissions(add=True)
-QueuePermissions.UPDATE = QueuePermissions(update=True)
-QueuePermissions.PROCESS = QueuePermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/queueservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/queueservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/queue/queueservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/queue/queueservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,878 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from azure.common import (
-    AzureConflictHttpError,
-    AzureHttpError,
-)
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from .._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_CONFLICT,
-    _ERROR_STORAGE_MISSING_INFO,
-)
-from .._serialization import (
-    _get_request_body,
-)
-from .._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from .._http import (
-    HTTPRequest,
-)
-from ..models import (
-    Services,
-    ListGenerator,
-)
-from .models import (
-    QueueMessageFormat,
-)
-from .._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-)
-from .._connection import _ServiceParameters
-from .._serialization import (
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from .._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-)
-from ._serialization import (
-    _convert_queue_message_xml,
-    _get_path,
-)
-from ._deserialization import (
-    _convert_xml_to_queues,
-    _convert_xml_to_queue_messages,
-    _parse_queue_message_from_headers,
-    _parse_metadata_and_message_count,
-)
-from ..sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from ..storageclient import StorageClient
-
-
-_HTTP_RESPONSE_NO_CONTENT = 204
-
-class QueueService(StorageClient):
-
-    '''
-    This is the main class managing queue resources.
-
-    The Queue service stores messages. A queue can contain an unlimited number of 
-    messages, each of which can be up to 64KB in size. Messages are generally added 
-    to the end of the queue and retrieved from the front of the queue, although 
-    first in, first out (FIFO) behavior is not guaranteed.
-
-    :ivar function(data) encode_function: 
-        A function used to encode queue messages. Takes as 
-        a parameter the data passed to the put_message API and returns the encoded 
-        message. Defaults to take text and xml encode, but bytes and other 
-        encodings can be used. For example, base64 may be preferable for developing 
-        across multiple Azure Storage libraries in different languages. See the 
-        :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and 
-        no encoding methods as well as binary equivalents.
-    :ivar function(data) decode_function: 
-        A function used to encode decode messages. Takes as 
-        a parameter the data returned by the get_messages and peek_messages APIs and 
-        returns the decoded message. Defaults to return text and xml decode, but 
-        bytes and other decodings can be used. For example, base64 may be preferable 
-        for developing across multiple Azure Storage libraries in different languages. 
-        See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 
-        and no decoding methods as well as binary equivalents.
-    '''
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'queue',
-            account_name=account_name, 
-            account_key=account_key, 
-            sas_token=sas_token, 
-            is_emulated=is_emulated, 
-            protocol=protocol, 
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string)
-            
-        super(QueueService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        self.encode_function = QueueMessageFormat.text_xmlencode
-        self.decode_function = QueueMessageFormat.text_xmldecode
-
-    def generate_account_shared_access_signature(self, resource_types, permission, 
-                                        expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue service.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.QUEUE, resource_types, permission, 
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_queue_shared_access_signature(self, queue_name,
-                                         permission=None, 
-                                         expiry=None,                                       
-                                         start=None,
-                                         id=None,
-                                         ip=None, protocol=None,):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            The name of the queue to create a SAS token for.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_queue(
-            queue_name,
-            permission=permission, 
-            expiry=expiry,
-            start=start, 
-            id=id,
-            ip=ip,
-            protocol=protocol,
-        )
-
-    def get_queue_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Queue service, including
-        logging, analytics and CORS rules.
-
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The queue service properties.
-        :rtype: :class:`~azure.storage.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path()
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        response = self._perform_request(request)
-
-        return _convert_xml_to_service_properties(response.body)
-
-    def set_queue_service_properties(self, logging=None, hour_metrics=None, 
-                                    minute_metrics=None, cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Queue service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved. 
-        For more information on Azure Storage Analytics, see 
-        https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
-
-        :param Logging logging:
-            The logging settings provide request logs.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for blobs.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for blobs.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service. For detailed information 
-            about CORS rules and evaluation logic, see 
-            https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
-        :type cors: list of :class:`~azure.storage.models.CorsRule`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path()
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
-        self._perform_request(request)
-
-    def list_queues(self, prefix=None, num_results=None, include_metadata=False, 
-                    marker=None, timeout=None):
-        '''
-        Returns a generator to list the queues. The generator will lazily follow 
-        the continuation tokens returned by the service and stop when all queues 
-        have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        queues, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param int num_results:
-            The maximum number of queues to return.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        '''
-        include = 'metadata' if include_metadata else None
-        kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include, 
-                  'marker': marker, 'timeout': timeout}
-        resp = self._list_queues(**kwargs)
-
-        return ListGenerator(resp, self._list_queues, (), kwargs)
-
-    def _list_queues(self, prefix=None, marker=None, max_results=None,
-                    include=None, timeout=None):
-        '''
-        Returns a list of queues under the specified account. Makes a single list 
-        request to the service. Used internally by the list_queues method.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param str marker:
-            A token which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            queues. The marker value is opaque to the client.
-        :param int max_results:
-            The maximum number of queues to return. A single list request may 
-            return up to 1000 queues and potentially a continuation token which 
-            should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path()
-        request.query = [
-            ('comp', 'list'),
-            ('prefix', _to_str(prefix)),
-            ('marker', _to_str(marker)),
-            ('maxresults', _int_to_str(max_results)),
-            ('include', _to_str(include)),
-            ('timeout', _int_to_str(timeout))
-        ]
-        response = self._perform_request(request)
-
-        return _convert_xml_to_queues(response)
-
-    def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a queue under the given account.
-
-        :param str queue_name:
-            The name of the queue to create. A queue name must be from 3 through 
-            63 characters long and may only contain lowercase letters, numbers, 
-            and the dash (-) character. The first and last letters in the queue 
-            must be alphanumeric. The dash (-) character cannot be the first or 
-            last character. Consecutive dash characters are not permitted in the 
-            queue name.
-        :param metadata:
-            A dict containing name-value pairs to associate with the queue as 
-            metadata. Note that metadata names preserve the case with which they 
-            were created, but are case-insensitive when set or read. 
-        :type metadata: a dict mapping str to str 
-        :param bool fail_on_exist:
-            Specifies whether to throw an exception if the queue already exists.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was created. If fail_on_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [('x-ms-meta-name-values', metadata)]
-        if not fail_on_exist:
-            try:
-                response = self._perform_request(request)
-                if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                    return False
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            response = self._perform_request(request)
-            if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                raise AzureConflictHttpError(
-                    _ERROR_CONFLICT.format(response.message), response.status)
-            return True
-
-    def delete_queue(self, queue_name, fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion 
-        and is no longer accessible to clients. The queue is later removed from 
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete. 
-        If an operation is attempted against the queue while it was being deleted, 
-        an :class:`AzureConflictHttpError` will be thrown.
-
-        :param str queue_name:
-            The name of the queue to delete.
-        :param bool fail_not_exist:
-            Specifies whether to throw an exception if the queue doesn't exist.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was deleted. If fail_not_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name)
-        request.query = [('timeout', _int_to_str(timeout))]
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_queue_metadata(self, queue_name, timeout=None):
-        '''
-        Retrieves user-defined metadata and queue properties on the specified
-        queue. Metadata is associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A dictionary representing the queue metadata with an 
-            approximate_message_count int property on the dict estimating the 
-            number of messages in the queue.
-        :rtype: a dict mapping str to str
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name)
-        request.query = [
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        response = self._perform_request(request)
-
-        return _parse_metadata_and_message_count(response)
-
-    def set_queue_metadata(self, queue_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata on the specified queue. Metadata is
-        associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param dict metadata:
-            A dict containing name-value pairs to associate with the
-            queue as metadata.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name)
-        request.query = [
-            ('comp', 'metadata'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.headers = [('x-ms-meta-name-values', metadata)]
-        self._perform_request(request)
-
-    def exists(self, queue_name, timeout=None):
-        '''
-        Returns a boolean indicating whether the queue exists.
-
-        :param str queue_name:
-            The name of queue to check for existence.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A boolean indicating whether the queue exists.
-        :rtype: bool
-        '''
-        try:
-            self.get_queue_metadata(queue_name, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def get_queue_acl(self, queue_name, timeout=None):
-        '''
-        Returns details about any stored access policies specified on the
-        queue that may be used with Shared Access Signatures.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the queue.
-        :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name)
-        request.query = [
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        response = self._perform_request(request)
-
-        return _convert_xml_to_signed_identifiers(response.body)
-
-    def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets stored access policies for the queue that may be used with Shared 
-        Access Signatures. 
-        
-        When you set permissions for a queue, the existing permissions are replaced. 
-        To update the queue’s permissions, call :func:`~get_queue_acl` to fetch 
-        all access policies associated with the queue, modify the access policy 
-        that you wish to change, and then call this function with the complete 
-        set of data to perform the update.
-
-        When you establish a stored access policy on a queue, it may take up to 
-        30 seconds to take effect. During this interval, a shared access signature 
-        that is associated with the stored access policy will throw an 
-        :class:`AzureHttpError` until the access policy becomes active.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the queue. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name)
-        request.query = [
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-        self._perform_request(request)
-
-    def put_message(self, queue_name, content, visibility_timeout=None,
-                    time_to_live=None, timeout=None):
-        '''
-        Adds a new message to the back of the message queue. 
-
-        The visibility timeout specifies the time that the message will be 
-        invisible. After the timeout expires, the message will become visible. 
-        If a visibility timeout is not specified, the default value of 0 is used.
-
-        The message time-to-live specifies how long a message will remain in the 
-        queue. The message will be deleted from the queue when the time-to-live 
-        period expires.
-
-        :param str queue_name:
-            The name of the queue to put the message into.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str. The encoded message can be up to 
-            64KB in size.
-        :param int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :param int time_to_live:
-            Specifies the time-to-live interval for the message, in
-            seconds. The maximum time-to-live allowed is 7 days. If this
-            parameter is omitted, the default time-to-live is 7 days.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('content', content)
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name, True)
-        request.query = [
-            ('visibilitytimeout', _to_str(visibility_timeout)),
-            ('messagettl', _to_str(time_to_live)),
-            ('timeout', _int_to_str(timeout))
-        ]
-        request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function))
-        self._perform_request(request)
-
-    def get_messages(self, queue_name, num_messages=None,
-                     visibility_timeout=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue.
-
-        When a message is retrieved from the queue, the response includes the message 
-        content and a pop_receipt value, which is required to delete the message. 
-        The message is not automatically deleted from the queue, but after it has 
-        been retrieved, it is not visible to other clients for the time interval 
-        specified by the visibility_timeout parameter.
-
-        :param str queue_name:
-            The name of the queue to get messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to retrieve from the queue, up to a maximum of 32. If
-            fewer are visible, the visible messages are returned. By default,
-            a single message is retrieved from the queue with this operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds, relative
-            to server time. The new value must be larger than or equal to 1
-            second, and cannot be larger than 7 days. The visibility timeout of 
-            a message can be set to a value later than the expiry time.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A list of :class:`~azure.storage.queue.models.QueueMessage` objects.
-        :rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name, True)
-        request.query = [
-            ('numofmessages', _to_str(num_messages)),
-            ('visibilitytimeout', _to_str(visibility_timeout)),
-            ('timeout', _int_to_str(timeout))
-        ]
-        response = self._perform_request(request)
-
-        return _convert_xml_to_queue_messages(response, self.decode_function)
-
-    def peek_messages(self, queue_name, num_messages=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue, but does
-        not alter the visibility of the message.
-
-        Only messages that are visible may be retrieved. When a message is retrieved 
-        for the first time with a call to get_messages, its dequeue_count property 
-        is set to 1. If it is not deleted and is subsequently retrieved again, the 
-        dequeue_count property is incremented. The client may use this value to 
-        determine how many times a message has been retrieved. Note that a call 
-        to peek_messages does not increment the value of DequeueCount, but returns 
-        this value for the client to read.
-
-        :param str queue_name:
-            The name of the queue to peek messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to peek from the queue, up to a maximum of 32. By default,
-            a single message is peeked from the queue with this operation.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that 
-            time_next_visible and pop_receipt will not be populated as peek does 
-            not pop the message and can only retrieve already visible messages.
-        :rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name, True)
-        request.query = [
-            ('peekonly', 'true'),
-            ('numofmessages', _to_str(num_messages)),
-            ('timeout', _int_to_str(timeout))]
-        response = self._perform_request(request)
-
-        return _convert_xml_to_queue_messages(response, self.decode_function)
-
-    def delete_message(self, queue_name, message_id, pop_receipt, timeout=None):
-        '''
-        Deletes the specified message.
-
-        Normally after a client retrieves a message with the get_messages operation, 
-        the client is expected to process and delete the message. To delete the 
-        message, you must have two items of data: id and pop_receipt. The 
-        id is returned from the previous get_messages operation. The 
-        pop_receipt is returned from the most recent :func:`~get_messages` or 
-        :func:`~update_message` operation. In order for the delete_message operation 
-        to succeed, the pop_receipt specified on the request must match the 
-        pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` 
-        operation. 
-
-        :param str queue_name:
-            The name of the queue from which to delete the message.
-        :param str message_id:
-            The message id identifying the message to delete.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message`.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = [
-            ('popreceipt', _to_str(pop_receipt)),
-            ('timeout', _int_to_str(timeout))]
-        self._perform_request(request)
-
-    def clear_messages(self, queue_name, timeout=None):
-        '''
-        Deletes all messages from the specified queue.
-
-        :param str queue_name:
-            The name of the queue whose messages to clear.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name, True)
-        request.query = [('timeout', _int_to_str(timeout))]
-        self._perform_request(request)
-
-    def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout, 
-                       content=None, timeout=None):
-        '''
-        Updates the visibility timeout of a message. You can also use this
-        operation to update the contents of a message.
-
-        This operation can be used to continually extend the invisibility of a 
-        queue message. This functionality can be useful if you want a worker role 
-        to “lease” a queue message. For example, if a worker role calls get_messages 
-        and recognizes that it needs more time to process a message, it can 
-        continually extend the message’s invisibility until it is processed. If 
-        the worker role were to fail during processing, eventually the message 
-        would become visible again and another worker role could process it.
-
-        :param str queue_name:
-            The name of the queue containing the message to update.
-        :param str message_id:
-            The message id identifying the message to update.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message` operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds,
-            relative to server time. The new value must be larger than or equal
-            to 0, and cannot be larger than 7 days. The visibility timeout of a
-            message cannot be set to a value later than the expiry time. A
-            message can be updated until it has been deleted or has expired.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that 
-            only time_next_visible and pop_receipt will be populated.
-        :rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        _validate_not_none('visibility_timeout', visibility_timeout)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = [
-            ('popreceipt', _to_str(pop_receipt)),
-            ('visibilitytimeout', _int_to_str(visibility_timeout)),
-            ('timeout', _int_to_str(timeout))
-        ]
-
-        if content is not None:
-            request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function))
-
-        response = self._perform_request(request)
-        return _parse_queue_message_from_headers(response)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,668 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from datetime import date
-
-from ._common_conversion import (
-    _sign_string,
-    _to_str,
-)
-from ._serialization import (
-    url_quote,
-    _to_utc_datetime,
-)
-from ._constants import X_MS_VERSION
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating blob, queue, table, and file shares access 
-    signature tokens with a common account name and account key.  Users can either 
-    use the factory or can construct the appropriate service and use the 
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to genenerate the shares access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-
-    def generate_table(self, table_name, permission=None, 
-                        expiry=None, start=None, id=None,
-                        ip=None, protocol=None,
-                        start_pk=None, start_rk=None, 
-                        end_pk=None, end_rk=None):
-        '''
-        Generates a shared access signature for the table.
-        Use the returned signature with the sas_token parameter of TableService.
-
-        :param str table_name:
-            Name of table.
-        :param TablePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str start_pk:
-            The minimum partition key accessible with this shared access 
-            signature. startpk must accompany startrk. Key values are inclusive. 
-            If omitted, there is no lower bound on the table entities that can 
-            be accessed.
-        :param str start_rk:
-            The minimum row key accessible with this shared access signature. 
-            startpk must accompany startrk. Key values are inclusive. If 
-            omitted, there is no lower bound on the table entities that can be 
-            accessed.
-        :param str end_pk:
-            The maximum partition key accessible with this shared access 
-            signature. endpk must accompany endrk. Key values are inclusive. If 
-            omitted, there is no upper bound on the table entities that can be 
-            accessed.
-        :param str end_rk:
-            The maximum row key accessible with this shared access signature. 
-            endpk must accompany endrk. Key values are inclusive. If omitted, 
-            there is no upper bound on the table entities that can be accessed.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk)
-        sas.add_resource_signature(self.account_name, self.account_key, 'table', table_name)
-
-        return sas.get_token()
-
-    def generate_queue(self, queue_name, permission=None, 
-                        expiry=None, start=None, id=None,
-                        ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            Name of queue.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, add, update, process.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name)
-
-        return sas.get_token()
-
-    def generate_blob(self, container_name, blob_name, permission=None, 
-                        expiry=None, start=None, id=None, ip=None, protocol=None,
-                        cache_control=None, content_disposition=None,
-                        content_encoding=None, content_language=None,
-                        content_type=None):
-        '''
-        Generates a shared access signature for the blob.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = container_name + '/' + blob_name
-
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_resource('b')
-        sas.add_override_response_headers(cache_control, content_disposition, 
-                                          content_encoding, content_language, 
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
-
-        return sas.get_token()
-
-    def generate_container(self, container_name, permission=None, expiry=None, 
-                        start=None, id=None, ip=None, protocol=None,
-                        cache_control=None, content_disposition=None,
-                        content_encoding=None, content_language=None,
-                        content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_resource('c')
-        sas.add_override_response_headers(cache_control, content_disposition, 
-                                          content_encoding, content_language, 
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name)
-
-        return sas.get_token()
-
-    def generate_file(self, share_name, directory_name=None, file_name=None, 
-                      permission=None, expiry=None, start=None, id=None,
-                      ip=None, protocol=None, cache_control=None,
-                      content_disposition=None, content_encoding=None, 
-                      content_language=None, content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so 
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = share_name
-        if directory_name is not None:
-            resource_path += '/' + _to_str(directory_name)
-        resource_path += '/' + _to_str(file_name)
-
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_resource('f')
-        sas.add_override_response_headers(cache_control, content_disposition, 
-                                          content_encoding, content_language, 
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path)
-
-        return sas.get_token()
-
-    def generate_share(self, share_name, permission=None, expiry=None, 
-                       start=None, id=None, ip=None, protocol=None, 
-                       cache_control=None, content_disposition=None, 
-                       content_encoding=None, content_language=None, 
-                       content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_id(id)
-        sas.add_resource('s')
-        sas.add_override_response_headers(cache_control, content_disposition, 
-                                          content_encoding, content_language, 
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name)
-
-        return sas.get_token()
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None, 
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-class _QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    TABLE_NAME = 'tn'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-
-class _SharedAccessHelper():
-
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _to_str(val)
-
-    def add_base(self, permission, expiry, start, ip, protocol):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(_QueryStringConstants.SIGNED_START, start)
-        self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(_QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(_QueryStringConstants.SIGNED_VERSION, X_MS_VERSION)
-
-    def add_resource(self, resource):
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, id):
-        self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_table_access_ranges(self, table_name, start_pk, start_rk, 
-                                    end_pk, end_rk):
-        self._add_query(_QueryStringConstants.TABLE_NAME, table_name)
-        self._add_query(_QueryStringConstants.START_PK, start_pk)
-        self._add_query(_QueryStringConstants.START_RK, start_rk)
-        self._add_query(_QueryStringConstants.END_PK, end_pk)
-        self._add_query(_QueryStringConstants.END_RK, end_rk)
-
-    def add_override_response_headers(self, cache_control,
-                                        content_disposition,
-                                        content_encoding,
-                                        content_language,
-                                        content_type):
-        self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_resource_signature(self, account_name, account_key, service, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-                get_value_to_append(_QueryStringConstants.SIGNED_START) +
-                get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-                canonicalized_resource +
-                get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
-                get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-                get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-                get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        if service == 'blob' or service == 'file':
-            string_to_sign += \
-                (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
-                get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-                get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-                get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-                get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        if service == 'table':
-            string_to_sign += \
-                (get_value_to_append(_QueryStringConstants.START_PK) +
-                get_value_to_append(_QueryStringConstants.START_RK) +
-                get_value_to_append(_QueryStringConstants.END_PK) +
-                get_value_to_append(_QueryStringConstants.END_RK))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, 
-                        _sign_string(account_key, string_to_sign))
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-                get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-                get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
-                get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-                get_value_to_append(_QueryStringConstants.SIGNED_START) +
-                get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-                get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-                get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-                get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, 
-                        _sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/storageclient.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/storageclient.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/storageclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/storageclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,139 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import os
-import sys
-import copy
-import requests
-
-from abc import ABCMeta
-from azure.common import (
-    AzureException,
-)
-from ._constants import (
-    _USER_AGENT_STRING,
-    _SOCKET_TIMEOUT
-)
-from ._http import HTTPError
-from ._http.httpclient import _HTTPClient
-from ._serialization import (
-    _storage_error_handler,
-    _update_request,
-)
-from ._error import (
-    _ERROR_STORAGE_MISSING_INFO,
-)
-
-class StorageClient(object):
-
-    '''
-    This is the base class for service objects. Service objects are used to do 
-    all requests to Storage. This class cannot be instantiated directly.
-    '''
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, connection_params):
-        '''
-        :param obj connection_params: The parameters to use to construct the client.
-        '''
-        self.account_name = connection_params.account_name
-        self.account_key = connection_params.account_key
-        self.sas_token = connection_params.sas_token
-
-        self.protocol = connection_params.protocol
-        self.primary_endpoint = connection_params.primary_endpoint
-        self.secondary_endpoint = connection_params.secondary_endpoint
-
-        self.request_session = connection_params.request_session
-
-        self._httpclient = _HTTPClient(
-            service_instance=self,
-            protocol=self.protocol,
-            request_session=connection_params.request_session or requests.Session(),
-            user_agent=_USER_AGENT_STRING,
-            timeout=_SOCKET_TIMEOUT,
-        )
-        self._filter = self._perform_request_worker
-
-    def with_filter(self, filter):
-        '''
-        Returns a new service which will process requests with the specified
-        filter. Filtering operations can include logging, automatic retrying,
-        etc... The filter is a lambda which receives the HTTPRequest and
-        another lambda. The filter can perform any pre-processing on the
-        request, pass it off to the next lambda, and then perform any
-        post-processing on the response.
-
-        :param function(request) filter: A filter function.
-        :return: A new service using the specified filter.
-        :rtype: a subclass of :class:`StorageClient`
-        '''
-        res = copy.deepcopy(self)
-        old_filter = self._filter
-
-        def new_filter(request):
-            return filter(request, old_filter)
-
-        res._filter = new_filter
-        return res
-
-    def set_proxy(self, host, port, user=None, password=None):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        :param str host: Address of the proxy. Ex: '192.168.0.100'
-        :param int port: Port of the proxy. Ex: 6000
-        :param str user: User for proxy authorization.
-        :param str password: Password for proxy authorization.
-        '''
-        self._httpclient.set_proxy(host, port, user, password)
-
-    def _get_host(self):
-        return self.primary_endpoint
-
-    def _perform_request_worker(self, request):
-        _update_request(request)
-        self.authentication.sign_request(request)
-        return self._httpclient.perform_request(request)
-
-    def _perform_request(self, request, encoding='utf-8'):
-        '''
-        Sends the request and return response. Catches HTTPError and hands it
-        to error handler
-        '''
-        try:
-            resp = self._filter(request)
-
-            if sys.version_info >= (3,) and isinstance(resp, bytes) and \
-                encoding:
-                resp = resp.decode(encoding)
-
-        # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
-        except HTTPError as ex:
-            _storage_error_handler(ex)
-
-        # Wrap all other exceptions as AzureExceptions to ease exception handling code
-        except Exception as ex:
-            if sys.version_info >= (3,):
-                # Automatic chaining in Python 3 means we keep the trace
-                raise AzureException
-            else:
-                # There isn't a good solution in 2 for keeping the stack trace 
-                # in general, or that will not result in an error in 3
-                # However, we can keep the previous error type and message
-                # TODO: In the future we will log the trace
-                raise AzureException('{}: {}'.format(ex.__class__.__name__, ex.args[0]))
-
-        return resp
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/__init__.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .models import (
-    Entity,
-    EntityProperty,
-    Table,
-    TablePermissions,
-    TablePayloadFormat,
-    EdmType,
-    AzureBatchOperationError,
-    AzureBatchValidationError,
-)
-from .tablebatch import TableBatch
-from .tableservice import TableService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,291 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-
-from dateutil import parser
-if sys.version_info < (3,):
-    from urllib2 import quote as url_quote
-else:
-    from urllib.parse import quote as url_quote
-from json import (
-    loads,
-)
-from .._http import HTTPResponse
-from azure.common import (
-    AzureException,
-)
-from .._common_conversion import (
-    _decode_base64_to_bytes,
-)
-from ._error import (
-    _ERROR_TYPE_NOT_SUPPORTED,
-    _ERROR_INVALID_PROPERTY_RESOLVER,
-)
-from .models import (
-    Entity,
-    EntityProperty,
-    Table,
-    EdmType,
-    AzureBatchOperationError,
-)
-from ..models import (
-    _list,
-    _HeaderDict,
-)
-
-def _get_continuation_from_response_headers(response):
-    marker = {}
-    for name, value in response.headers:
-        if name.startswith('x-ms-continuation'):
-            marker[name[len('x-ms-continuation') + 1:]] = value
-    return marker
-
-# Tables of conversions to and from entity types.  We support specific
-# datatypes, and beyond that the user can use an EntityProperty to get
-# custom data type support.
-
-def _from_entity_binary(value):
-    return EntityProperty(EdmType.BINARY, _decode_base64_to_bytes(value))
-
-
-def _from_entity_int32(value):
-    return EntityProperty(EdmType.INT32, int(value))
-
-
-def _from_entity_datetime(value):
-    # Note that Azure always returns UTC datetime, and dateutil parser
-    # will set the tzinfo on the date it returns
-    return parser.parse(value)
-
-
-_EDM_TYPES = [EdmType.BINARY, EdmType.INT64, EdmType.GUID, EdmType.DATETIME,
-              EdmType.STRING, EdmType.INT32, EdmType.DOUBLE, EdmType.BOOLEAN]
-
-
-_ENTITY_TO_PYTHON_CONVERSIONS = {
-    EdmType.BINARY: _from_entity_binary,
-    EdmType.INT32: _from_entity_int32,
-    EdmType.INT64: int,
-    EdmType.DOUBLE: float,
-    EdmType.DATETIME: _from_entity_datetime,
-}
-
-
-def _convert_json_response_to_entity(response, property_resolver):
-    if response is None:
-        return response
-
-    root = loads(response.body.decode('utf-8'))
-
-    return _convert_json_to_entity(root, property_resolver)
-
-
-def _convert_json_to_entity(entry_element, property_resolver):
-    ''' Convert json response to entity.
-
-    The entity format is:
-    {
-       "Address":"Mountain View",
-       "Age":23,
-       "AmountDue":200.23,
-       "CustomerCode@odata.type":"Edm.Guid",
-       "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
-       "CustomerSince@odata.type":"Edm.DateTime",
-       "CustomerSince":"2008-07-10T00:00:00",
-       "IsActive":true,
-       "NumberOfOrders@odata.type":"Edm.Int64",
-       "NumberOfOrders":"255",
-       "PartitionKey":"mypartitionkey",
-       "RowKey":"myrowkey"
-    }
-    '''
-    entity = Entity()
-
-    properties = {}
-    edmtypes = {}
-    odata = {}
-
-    for name, value in entry_element.items():
-        if name.startswith('odata.'):
-            odata[name[6:]] = value
-        elif name.endswith('@odata.type'):
-            edmtypes[name[:-11]] = value
-        else:
-            properties[name] = value
-
-    # Partition key is a known property
-    partition_key = properties.pop('PartitionKey', None)
-    if partition_key:
-        entity['PartitionKey'] = partition_key
-
-    # Row key is a known property
-    row_key = properties.pop('RowKey', None)
-    if row_key:
-        entity['RowKey'] = row_key
-
-    # Timestamp is a known property
-    timestamp = properties.pop('Timestamp', None)
-    if timestamp:
-        entity['Timestamp'] = _from_entity_datetime(timestamp)
-        
-    for name, value in properties.items():
-        mtype = edmtypes.get(name);
-
-        # use the property resolver if present
-        if property_resolver:
-            mtype = property_resolver(partition_key, row_key, 
-                                      name, value, mtype)
-
-            # throw if the type returned is not a valid edm type
-            if mtype and mtype not in _EDM_TYPES:
-                raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype))
-
-        # Add type for Int32
-        if type(value) is int:
-            mtype = EdmType.INT32
-
-        # no type info, property should parse automatically
-        if not mtype: 
-            entity[name] = value
-        else:  # need an object to hold the property
-            conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
-            if conv is not None:
-                try:
-                    property = conv(value)
-                except Exception as e:
-                    # throw if the type returned by the property resolver
-                    # cannot be used in the conversion
-                    if property_resolver:
-                        raise AzureException(
-                            _ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype))
-                    else:
-                        raise e
-            else:
-                property = EntityProperty(mtype, value)
-            entity[name] = property
-
-    # extract etag from entry
-    etag = odata.get('etag')
-    if timestamp:
-         etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"'
-    entity['etag'] = etag
-
-    return entity
-
-
-def _convert_json_response_to_tables(response):
-    ''' Converts the response to tables class.
-    '''
-    if response is None:
-        return response
-
-    tables = _list()
-
-    continuation = _get_continuation_from_response_headers(response)
-    tables.next_marker = continuation.get('NextTableName')
-
-    root = loads(response.body.decode('utf-8'))
-
-    if 'TableName' in root:
-        table = Table()
-        table.name = root['TableName']
-        tables.append(table)
-    else:
-        for element in root['value']:
-            table = Table()
-            table.name = element['TableName']
-            tables.append(table)
-
-    return tables
-
-
-def _convert_json_response_to_entities(response, property_resolver):
-    ''' Converts the response to tables class.
-    '''
-    if response is None:
-        return response
-
-    entities = _list()
-
-    entities.next_marker = _get_continuation_from_response_headers(response)
-
-    root = loads(response.body.decode('utf-8'))
-
-    if 'value' in root:
-        for entity in root['value']:
-            entities.append(_convert_json_to_entity(entity, 
-                                                 property_resolver))
-    else:
-        entities.append(_convert_json_to_entity(entity, 
-                                             property_resolver))
-
-    return entities
-
-def _extract_etag(response):
-    ''' Extracts the etag from the response headers. '''
-    if response and response.headers:
-        for name, value in response.headers:
-            if name.lower() == 'etag':
-                return value
-
-    return None
-
-def _parse_batch_response(body):
-    parts = body.split(b'--changesetresponse_')
-
-    responses = []
-    for part in parts:
-        httpLocation = part.find(b'HTTP/')
-        if httpLocation > 0:
-            response = _parse_batch_response_part(part[httpLocation:])
-            if response.status >= 300:
-                _parse_batch_error(response)
-            responses.append(_extract_etag(response))
-
-    return responses
-
-def _parse_batch_response_part(part):
-    lines = part.splitlines();
-
-    # First line is the HTTP status/reason
-    status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
-
-    # Followed by headers and body
-    headers = []
-    body = b''
-    isBody = False
-    for line in lines[1:]:
-        if line == b'' and not isBody:
-            isBody = True
-        elif isBody:
-            body += line
-        else:
-            headerName, _, headerVal = line.partition(b': ')
-            headers.append((headerName.lower().decode("utf-8"), headerVal.decode("utf-8")))
-
-    return HTTPResponse(int(status), reason.strip(), headers, body)
-
-def _parse_batch_error(response):
-    doc = loads(response.body.decode('utf-8'))
-
-    code = ''
-    message = ''
-    error = doc.get('odata.error')
-    if error:
-        code = error.get('code')
-        if error.get('message'):
-            message = error.get('message').get('value')
-
-    raise AzureBatchOperationError(message, response.status, code)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_error.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,68 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-from .._error import (
-    _validate_not_none,
-    _ERROR_VALUE_NONE_OR_EMPTY,
-)
-
-_ERROR_ATTRIBUTE_MISSING = '\'{0}\' object has no attribute \'{1}\''
-_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
-_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
-_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
-_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
-    'Cannot serialize the specified value ({0}) to an entity.  Please use ' + \
-    'an EntityProperty (which can specify custom types), int, str, bool, ' + \
-    'or datetime.'
-_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
-    'Row Keys should not be the same in a batch operations'
-_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
-    'Partition Key should be the same in a batch operations'
-_ERROR_INVALID_ENTITY_TYPE = 'The entity must be either in dict format or an entity object.'
-_ERROR_INVALID_PROPERTY_RESOLVER = \
-    'The specified property resolver returned an invalid type. Name: {0}, Value: {1}, ' + \
-    'EdmType: {2}'
-_ERROR_PROPERTY_NAME_TOO_LONG = 'The property name exceeds the maximum allowed length.'
-_ERROR_TOO_MANY_ENTITIES_IN_BATCH = \
-    'Batches may only contain 100 operations'
-_ERROR_TOO_MANY_PROPERTIES = 'The entity contains more properties than allowed.'
-_ERROR_TYPE_NOT_SUPPORTED = 'Type not supported when sending data to the service: {0}.'
-_ERROR_VALUE_TOO_LARGE = '{0} is too large to be cast to type {1}.'
-
-
-def _validate_object_has_param(param_name, object):
-    if not object.get(param_name):
-        raise ValueError(_ERROR_VALUE_NONE_OR_EMPTY.format(param_name))
-
-def _validate_entity(entity):
-    # Validate entity exists
-    _validate_not_none('entity', entity)
-
-    # Entity inherits from dict, so just validating dict is fine
-    if not isinstance(entity, dict):
-        raise TypeError(_ERROR_INVALID_ENTITY_TYPE)
-
-    # Validate partition key and row key are present
-    _validate_object_has_param('PartitionKey', entity)
-    _validate_object_has_param('RowKey', entity)
-
-    # Validate there are not more than 255 properties including Timestamp
-    if (len(entity) > 255) or (len(entity) > 254 and not 'Timestamp' in entity):
-        raise ValueError(_ERROR_TOO_MANY_PROPERTIES)
-
-    # Validate the property names are not too long
-    for propname in entity:
-        if len(propname) > 255:
-            raise ValueError(_ERROR_PROPERTY_NAME_TOO_LONG)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_request.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_request.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_request.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_request.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,136 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from .._http import HTTPRequest
-from .._common_conversion import (
-    _to_str,
-)
-from .._error import (
-    _validate_not_none,
-)
-from .._serialization import (
-    _get_request_body,
-)
-from ._error import (
-    _validate_entity,
-)
-from ._serialization import (
-    _convert_entity_to_json,
-    _DEFAULT_ACCEPT_HEADER,
-    _DEFAULT_CONTENT_TYPE_HEADER,
-    _DEFAULT_PREFER_HEADER,
-)
-
-def _get_entity(partition_key, row_key, select, accept):
-    '''
-    Constructs a get entity request.
-    '''
-    _validate_not_none('partition_key', partition_key)
-    _validate_not_none('row_key', row_key)
-    _validate_not_none('accept', accept)
-    request = HTTPRequest()
-    request.method = 'GET'
-    request.headers = [('Accept', _to_str(accept))]
-    request.query = [('$select', _to_str(select))]
-
-    return request
-
-def _insert_entity(entity):
-    '''
-    Constructs an insert entity request.
-    '''
-    _validate_entity(entity)
-
-    request = HTTPRequest()
-    request.method = 'POST'
-    request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
-                        _DEFAULT_PREFER_HEADER,
-                        _DEFAULT_ACCEPT_HEADER]
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-def _update_entity(entity, if_match):
-    '''
-    Constructs an update entity request.
-    '''
-    _validate_not_none('if_match', if_match)
-    _validate_entity(entity)
-
-    request = HTTPRequest()
-    request.method = 'PUT'
-    request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
-                        _DEFAULT_ACCEPT_HEADER,
-                        ('If-Match', _to_str(if_match)),]
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-def _merge_entity(entity, if_match):
-    '''
-    Constructs a merge entity request.
-    '''
-    _validate_not_none('if_match', if_match)
-    _validate_entity(entity)
-
-    request = HTTPRequest()
-    request.method = 'MERGE'
-    request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
-                        _DEFAULT_ACCEPT_HEADER,
-                        ('If-Match', _to_str(if_match))]
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-def _delete_entity(partition_key, row_key, if_match):
-    '''
-     Constructs a delete entity request.
-    '''
-    _validate_not_none('if_match', if_match)
-    _validate_not_none('partition_key', partition_key)
-    _validate_not_none('row_key', row_key)
-    request = HTTPRequest()
-    request.method = 'DELETE'
-    request.headers = [_DEFAULT_ACCEPT_HEADER,
-                        ('If-Match', _to_str(if_match))]
-
-    return request
-
-def _insert_or_replace_entity(entity):
-    '''
-    Constructs an insert or replace entity request.
-    '''
-    _validate_entity(entity)
-
-    request = HTTPRequest()
-    request.method = 'PUT'
-    request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
-                        _DEFAULT_ACCEPT_HEADER]
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
-
-def _insert_or_merge_entity(entity):
-    '''
-    Constructs an insert or merge entity request.
-    '''
-    _validate_entity(entity)
-
-    request = HTTPRequest()
-    request.method = 'MERGE'
-    request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
-                        _DEFAULT_ACCEPT_HEADER]
-    request.body = _get_request_body(_convert_entity_to_json(entity))
-
-    return request
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_serialization.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,255 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-import sys
-import types
-import uuid
-
-from datetime import datetime
-from json import (
-    dumps,
-)
-from math import(
-    isnan,
-)
-from .._common_conversion import (
-    _encode_base64,
-    _to_str,
-)
-from .._serialization import (
-    _to_utc_datetime,
-)
-from ._error import (
-    _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
-    _ERROR_TYPE_NOT_SUPPORTED,
-    _ERROR_VALUE_TOO_LARGE,
-)
-from .models import (
-    EntityProperty,
-    TablePayloadFormat,
-    EdmType,
-)
-
-if sys.version_info < (3,):
-    def _new_boundary():
-        return str(uuid.uuid1())
-else:
-    def _new_boundary():
-        return str(uuid.uuid1()).encode('utf-8')
-
-_DEFAULT_ACCEPT_HEADER = ('Accept', TablePayloadFormat.JSON_MINIMAL_METADATA)
-_DEFAULT_CONTENT_TYPE_HEADER = ('Content-Type', 'application/json')
-_DEFAULT_PREFER_HEADER = ('Prefer', 'return-no-content')
-_SUB_HEADERS = ['If-Match', 'Prefer', 'Accept', 'Content-Type', 'DataServiceVersion']
-
-def _get_entity_path(table_name, partition_key, row_key):
-    return '/{0}(PartitionKey=\'{1}\',RowKey=\'{2}\')'.format(
-            _to_str(table_name), 
-            _to_str(partition_key), 
-            _to_str(row_key))
-
-def _update_storage_table_header(request):
-    ''' add additional headers for storage table request. '''
-
-    # set service version
-    request.headers.append(('DataServiceVersion', '3.0;NetFx'))
-    request.headers.append(('MaxDataServiceVersion', '3.0'))
-
-def _to_entity_binary(value):
-   return EdmType.BINARY, _encode_base64(value)
-
-def _to_entity_bool(value):
-    return None, value
-
-def _to_entity_datetime(value):
-    return EdmType.DATETIME, _to_utc_datetime(value)
-
-def _to_entity_float(value):
-    if isnan(value):
-        return EdmType.DOUBLE, 'NaN'
-    if value == float('inf'):
-        return EdmType.DOUBLE, 'Infinity'
-    if value == float('-inf'):
-        return EdmType.DOUBLE, '-Infinity'
-    return None, value
-
-def _to_entity_guid(value):
-   return EdmType.GUID, str(value)
-
-def _to_entity_int32(value):
-    if sys.version_info < (3,):
-        value = long(value)
-    else:
-        value = int(value)
-    if value >= 2**15 or value < -(2**15):
-        raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT32))       
-    return None, value
-
-def _to_entity_int64(value):
-    if sys.version_info < (3,):
-        ivalue = long(value)
-    else:
-        ivalue = int(value)
-    if ivalue >= 2**31 or ivalue < -(2**31):
-        raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT64))       
-    return EdmType.INT64, str(value)
-
-def _to_entity_str(value):
-    return None, value
-
-def _to_entity_none(value):
-    return None, None
-
-# Conversion from Python type to a function which returns a tuple of the
-# type string and content string.
-_PYTHON_TO_ENTITY_CONVERSIONS = {
-    int: _to_entity_int64,
-    bool: _to_entity_bool,
-    datetime: _to_entity_datetime,
-    float: _to_entity_float,
-    str: _to_entity_str,
-}
-
-# Conversion from Edm type to a function which returns a tuple of the
-# type string and content string.
-_EDM_TO_ENTITY_CONVERSIONS = {
-    EdmType.BINARY: _to_entity_binary,
-    EdmType.BOOLEAN: _to_entity_bool,
-    EdmType.DATETIME: _to_entity_datetime,
-    EdmType.DOUBLE: _to_entity_float,
-    EdmType.GUID: _to_entity_guid,
-    EdmType.INT32: _to_entity_int32,
-    EdmType.INT64: _to_entity_int64,
-    EdmType.STRING: _to_entity_str,
-}
-
-if sys.version_info < (3,):
-    _PYTHON_TO_ENTITY_CONVERSIONS.update({
-        long: _to_entity_int64,
-        types.NoneType: _to_entity_none,
-        unicode: _to_entity_str,
-    })
-
-
-def _convert_entity_to_json(source):
-    ''' Converts an entity object to json to send.
-    The entity format is:
-    {
-       "Address":"Mountain View",
-       "Age":23,
-       "AmountDue":200.23,
-       "CustomerCode@odata.type":"Edm.Guid",
-       "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
-       "CustomerSince@odata.type":"Edm.DateTime",
-       "CustomerSince":"2008-07-10T00:00:00",
-       "IsActive":true,
-       "NumberOfOrders@odata.type":"Edm.Int64",
-       "NumberOfOrders":"255",
-       "PartitionKey":"mypartitionkey",
-       "RowKey":"myrowkey"
-    }
-    '''
-
-    properties = {}
-
-    # set properties type for types we know if value has no type info.
-    # if value has type info, then set the type to value.type
-    for name, value in source.items():
-        mtype = ''
-
-        if isinstance(value, EntityProperty):
-            conv = _EDM_TO_ENTITY_CONVERSIONS.get(value.type)
-            if conv is None:
-                raise TypeError(
-                    _ERROR_TYPE_NOT_SUPPORTED.format(value.type))
-            mtype, value = conv(value.value)
-        else:
-            conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
-            if conv is None and sys.version_info >= (3,) and value is None:
-                conv = _to_entity_none
-            if conv is None:
-                raise TypeError(
-                    _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(
-                        type(value).__name__))
-            mtype, value = conv(value)
-
-        # form the property node
-        properties[name] = value
-        if mtype:
-            properties[name + '@odata.type'] = mtype
-
-    # generate the entity_body
-    return dumps(properties)
-
-
-def _convert_table_to_json(table_name):
-    '''
-    Create json to send for a given table name. Since json format for table is
-    the same as entity and the only difference is that table has only one
-    property 'TableName', so we just call _convert_entity_to_json.
-
-    table_name:
-        the name of the table
-    '''
-    return _convert_entity_to_json({'TableName': table_name})
-
-def _convert_batch_to_json(batch_requests):
-    '''
-    Create json to send for an array of batch requests.
-
-    batch_requests:
-        an array of requests
-    '''
-    batch_boundary = b'batch_' + _new_boundary()
-    changeset_boundary = b'changeset_' + _new_boundary()
-
-    body = []
-    body.append(b'--' + batch_boundary + b'\n')
-    body.append(b'Content-Type: multipart/mixed; boundary=')
-    body.append(changeset_boundary + b'\n\n')
-
-    content_id = 1
-
-    # Adds each request body to the POST data.
-    for _, request in batch_requests:
-        body.append(b'--' + changeset_boundary + b'\n')
-        body.append(b'Content-Type: application/http\n')
-        body.append(b'Content-Transfer-Encoding: binary\n\n')
-        body.append(request.method.encode('utf-8'))
-        body.append(b' http://')
-        body.append(request.host.encode('utf-8'))
-        body.append(request.path.encode('utf-8'))
-        body.append(b' HTTP/1.1\n')
-        body.append(b'Content-ID: ')
-        body.append(str(content_id).encode('utf-8') + b'\n')
-        content_id += 1
-
-        for name, value in request.headers:
-            if name in _SUB_HEADERS:
-                body.append(name.encode('utf-8') + b': ')
-                body.append(value.encode('utf-8') + b'\n')
-
-        # Add different headers for different request types.
-        if not request.method == 'DELETE':
-            body.append(b'Content-Length: ')
-            body.append(str(len(request.body)).encode('utf-8'))
-            body.append(b'\n\n')
-            body.append(request.body + b'\n')
-
-        body.append(b'\n')
-
-    body.append(b'--' + changeset_boundary + b'--' + b'\n')
-    body.append(b'--' + batch_boundary + b'--')
-
-    return b''.join(body), 'multipart/mixed; boundary=' + batch_boundary.decode('utf-8')
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/models.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/models.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,204 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from azure.common import (
-    AzureException,
-    AzureHttpError,
-)
-from ._error import (
-    _ERROR_ATTRIBUTE_MISSING,
-)
-
-class AzureBatchValidationError(AzureException):
-    '''
-    Indicates that a batch operation cannot proceed due to invalid input.
-
-    :ivar str message: 
-        A detailed error message indicating the reason for the failure. 
-    '''
-
-class AzureBatchOperationError(AzureHttpError):
-
-    '''
-    Indicates that a batch operation failed.
-    
-    :ivar str message: 
-        A detailed error message indicating the index of the batch 
-        request which failed and the reason for the failure. For example, 
-        '0:One of the request inputs is out of range.' indicates the 0th batch 
-        request failed as one of its property values was out of range.
-    :ivar int status_code: 
-        The HTTP status code of the batch request. For example, 400.
-    :ivar str batch_code: 
-        The batch status code. For example, 'OutOfRangeInput'.
-    '''
-
-    def __init__(self, message, status_code, batch_code):
-        super(AzureBatchOperationError, self).__init__(message, status_code)
-        self.code = batch_code
-
-class Entity(dict):
-    '''
-    An entity object. Can be accessed as a dict or as an obj. The attributes of 
-    the entity will be created dynamically. For example, the following are both 
-    valid::
-        entity = Entity()
-        entity.a = 'b'
-        entity['x'] = 'y'
-    '''
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name))
-
-    __setattr__ = dict.__setitem__
-
-    def __delattr__(self, name):
-        try:
-            del self[name]
-        except KeyError:
-            raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name))
-
-    def __dir__(self):
-        return dir({}) + list(self.keys())
-
-
-class EntityProperty(object):
-    '''
-    An entity property. Used to explicitly set :class:`~EdmType` when necessary. 
-    
-    Values which require explicit typing are GUID, INT32, and BINARY. Other EdmTypes
-    may be explicitly create as EntityProperty objects but need not be. For example, 
-    the below with both create STRING typed properties on the entity::
-        entity = Entity()
-        entity.a = 'b'
-        entity.x = EntityProperty(EdmType.STRING, 'y')
-    '''
-
-    def __init__(self, type=None, value=None):
-        '''
-        Represents an Azure Table. Returned by list_tables.
-
-        :param str type: The type of the property.
-        :param EdmType value: The value of the property.
-        '''
-        self.type = type
-        self.value = value
-
-
-class Table(object):
-    '''
-    Represents an Azure Table. Returned by list_tables.
-
-    :ivar str name: The name of the table.
-    '''
-    pass
-
-
-class TablePayloadFormat(object):
-    '''
-    Specifies the accepted content type of the response payload. More information
-    can be found here: https://msdn.microsoft.com/en-us/library/azure/dn535600.aspx
-    '''
-   
-    JSON_NO_METADATA = 'application/json;odata=nometadata'
-    '''Returns no type information for the entity properties.'''
-    
-    JSON_MINIMAL_METADATA = 'application/json;odata=minimalmetadata'
-    '''Returns minimal type information for the entity properties.'''
-    
-    JSON_FULL_METADATA = 'application/json;odata=fullmetadata'
-    '''Returns minimal type information for the entity properties plus some extra odata properties.'''
-
-
-class EdmType(object):
-    '''
-    Used by :class:`~.EntityProperty` to represent the type of the entity property 
-    to be stored by the Table service.
-    '''
-
-    BINARY = 'Edm.Binary'
-    ''' Represents byte data. Must be specified. '''
-
-    INT64 = 'Edm.Int64'
-    ''' Represents a number between -(2^31) and 2^31. This is the default type for Python numbers. '''
-
-    GUID = 'Edm.Guid'
-    ''' Represents a GUID. Must be specified. '''
-
-    DATETIME = 'Edm.DateTime'
-    ''' Represents a date. This type will be inferred for Python datetime objects. '''
-
-    STRING = 'Edm.String'
-    ''' Represents a string. This type will be inferred for Python strings. '''
-
-    INT32 = 'Edm.Int32'
-    ''' Represents a number between -(2^15) and 2^15. Must be specified or numbers will default to INT64. '''
-
-    DOUBLE = 'Edm.Double'
-    ''' Represents a double. This type will be inferred for Python floating point numbers. '''
-
-    BOOLEAN = 'Edm.Boolean'
-    ''' Represents a boolean. This type will be inferred for Python bools. '''
-
-
-class TablePermissions(object):
-    '''
-    TablePermissions class to be used with the :func:`~azure.storage.table.tableservice.TableService.generate_table_shared_access_signature`
-    method and for the AccessPolicies used with :func:`~azure.storage.table.tableservice.TableService.set_table_acl`.
-
-    :ivar TablePermissions TablePermissions.QUERY: Get entities and query entities.
-    :ivar TablePermissions TablePermissions.ADD: Add entities.
-    :ivar TablePermissions TablePermissions.UPDATE: Update entities.
-    :ivar TablePermissions TablePermissions.DELETE: Delete entities.
-    '''
-
-    def __init__(self, query=False, add=False, update=False, delete=False, _str=None):
-        '''
-        :param bool query:
-            Get entities and query entities.
-        :param bool add:
-            Add entities. Add and Update permissions are required for upsert operations.
-        :param bool update:
-            Update entities. Add and Update permissions are required for upsert operations.
-        :param bool delete: 
-            Delete entities.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.query = query or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.update = update or ('u' in _str)
-        self.delete = delete or ('d' in _str)
-    
-    def __or__(self, other):
-        return TablePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return TablePermissions(_str=str(self) + str(other))
-    
-    def __str__(self):
-        return (('r' if self.query else '') +
-                ('a' if self.add else '') +
-                ('u' if self.update else '') +
-                ('d' if self.delete else ''))
-
-TablePermissions.QUERY = TablePermissions(query=True)
-TablePermissions.ADD = TablePermissions(add=True)
-TablePermissions.UPDATE = TablePermissions(update=True)
-TablePermissions.DELETE = TablePermissions(delete=True)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/tablebatch.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/tablebatch.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/tablebatch.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/tablebatch.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,198 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from ._error import (
-    _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
-    _ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
-    _ERROR_TOO_MANY_ENTITIES_IN_BATCH,
-)
-from .models import (
-    AzureBatchValidationError,
-)
-from ._request import (
-    _insert_entity,
-    _update_entity,
-    _merge_entity,
-    _delete_entity,
-    _insert_or_replace_entity,
-    _insert_or_merge_entity,
-)
-
-class TableBatch(object):
-
-    '''
-    This is the class that is used for batch operation for storage table service.
-
-    The Table service supports batch transactions on entities that are in the 
-    same table and belong to the same partition group. Multiple operations are 
-    supported within a single transaction. The batch can include at most 100 
-    entities, and its total payload may be no more than 4 MB in size.
-    '''
-
-    def __init__(self):
-        self._requests = []
-        self._partition_key = None
-        self._row_keys = []
-
-    def insert_entity(self, entity):
-        '''
-        Adds an insert entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.insert_entity` for more 
-        information on inserts.
-        
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to insert. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`azure.storage.table.models.Entity`
-        '''
-        request = _insert_entity(entity)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def update_entity(self, entity, if_match='*'):
-        '''
-        Adds an update entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.update_entity` for more 
-        information on updates.
-        
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to update. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The update operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional update, set If-Match to the wildcard character (*).
-        '''
-        request = _update_entity(entity, if_match)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def merge_entity(self, entity, if_match='*'):
-        '''
-        Adds a merge entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more 
-        information on merges.
-        
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The merge operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional merge, set If-Match to the wildcard character (*).
-        '''
-        request = _merge_entity(entity, if_match)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def delete_entity(self, partition_key, row_key,
-                      if_match='*'):
-        '''
-        Adds a delete entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.delete_entity` for more 
-        information on deletes.
-
-        The operation will not be executed until the batch is committed.
-
-        :param str partition_key:
-            The PartitionKey of the entity.
-        :param str row_key:
-            The RowKey of the entity.
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The delete operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional delete, set If-Match to the wildcard character (*).
-        '''
-        request = _delete_entity(partition_key, row_key, if_match)
-        self._add_to_batch(partition_key, row_key, request)
-
-    def insert_or_replace_entity(self, entity):
-        '''
-        Adds an insert or replace entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more 
-        information on insert or replace operations.
-
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to insert or replace. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`azure.storage.table.models.Entity`
-       '''
-        request = _insert_or_replace_entity(entity)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def insert_or_merge_entity(self, entity):
-        '''
-        Adds an insert or merge entity operation to the batch. See 
-        :func:`~azure.storage.table.tableservice.TableService.insert_or_merge_entity` for more 
-        information on insert or merge operations.
-
-        The operation will not be executed until the batch is committed.
-
-        :param entity:
-            The entity to insert or merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`azure.storage.table.models.Entity`
-        '''
-        request = _insert_or_merge_entity(entity)
-        self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
-
-    def _add_to_batch(self, partition_key, row_key, request):
-        '''
-        Validates batch-specific rules.
-        
-        :param str partition_key:
-            PartitionKey of the entity.
-        :param str row_key:
-            RowKey of the entity.
-        :param request:
-            the request to insert, update or delete entity
-        '''
-        # All same partition keys
-        if self._partition_key:
-            if self._partition_key != partition_key:
-                raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
-        else:
-            self._partition_key = partition_key
-
-        # All different row keys
-        if row_key in self._row_keys:
-            raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
-        else:
-            self._row_keys.append(row_key)
-        
-        # 100 entities
-        if len(self._requests) >= 100:
-            raise AzureBatchValidationError(_ERROR_TOO_MANY_ENTITIES_IN_BATCH)
-
-        # Add the request to the batch
-        self._requests.append((row_key, request))
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/tableservice.py 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/tableservice.py
--- 1.4.0-1/azure/multiapi/storage/v2015_04_05/table/tableservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2015_04_05/table/tableservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1006 +0,0 @@
-﻿#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-from contextlib import contextmanager
-from azure.common import (
-    AzureHttpError,
-)
-from .._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from .._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_STORAGE_MISSING_INFO,
-)
-from .._serialization import (
-    _get_request_body,
-    _update_request,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from .._http import HTTPRequest
-from ..models import (
-    Services,
-    ListGenerator,
-)
-from .models import TablePayloadFormat
-from .._auth import (
-    _StorageSASAuthentication,
-    _StorageTableSharedKeyAuthentication,
-)
-from .._connection import _ServiceParameters
-from .._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-)
-from ._serialization import (
-    _convert_table_to_json,
-    _convert_batch_to_json,
-    _update_storage_table_header,
-    _get_entity_path,
-    _DEFAULT_ACCEPT_HEADER,
-    _DEFAULT_CONTENT_TYPE_HEADER,
-    _DEFAULT_PREFER_HEADER,
-)
-from ._deserialization import (
-    _convert_json_response_to_entity,
-    _convert_json_response_to_tables,
-    _convert_json_response_to_entities,
-    _parse_batch_response,
-    _extract_etag,
-)
-from .._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ._request import (
-    _get_entity,
-    _insert_entity,
-    _update_entity,
-    _merge_entity,
-    _delete_entity,
-    _insert_or_replace_entity,
-    _insert_or_merge_entity,
-)
-from ..sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from ..storageclient import StorageClient
-from .tablebatch import TableBatch
-
-class TableService(StorageClient):
-
-    '''
-    This is the main class managing Azure Table resources.
-
-    The Azure Table service offers structured storage in the form of tables. Tables 
-    store data as collections of entities. Entities are similar to rows. An entity 
-    has a primary key and a set of properties. A property is a name, typed-value pair, 
-    similar to a column. The Table service does not enforce any schema for tables, 
-    so two entities in the same table may have different sets of properties. Developers 
-    may choose to enforce a schema on the client side. A table may contain any number 
-    of entities.
-    '''
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'table',
-            account_name=account_name, 
-            account_key=account_key, 
-            sas_token=sas_token, 
-            is_emulated=is_emulated, 
-            protocol=protocol, 
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string)
-            
-        super(TableService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageTableSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-
-    def generate_account_shared_access_signature(self, resource_types, permission, 
-                                        expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the table service.
-        Use the returned signature with the sas_token parameter of TableService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.TABLE, resource_types, permission, 
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-
-    def generate_table_shared_access_signature(self, table_name, permission=None, 
-                                        expiry=None, start=None, id=None,
-                                        ip=None, protocol=None,
-                                        start_pk=None, start_rk=None, 
-                                        end_pk=None, end_rk=None):
-        '''
-        Generates a shared access signature for the table.
-        Use the returned signature with the sas_token parameter of TableService.
-
-        :param str table_name:
-            The name of the table to create a SAS token for.
-        :param TablePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: date or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: date or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_table_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
-        :param str start_pk:
-            The minimum partition key accessible with this shared access 
-            signature. startpk must accompany startrk. Key values are inclusive. 
-            If omitted, there is no lower bound on the table entities that can 
-            be accessed.
-        :param str start_rk:
-            The minimum row key accessible with this shared access signature. 
-            startpk must accompany startrk. Key values are inclusive. If 
-            omitted, there is no lower bound on the table entities that can be 
-            accessed.
-        :param str end_pk:
-            The maximum partition key accessible with this shared access 
-            signature. endpk must accompany endrk. Key values are inclusive. If 
-            omitted, there is no upper bound on the table entities that can be 
-            accessed.
-        :param str end_rk:
-            The maximum row key accessible with this shared access signature. 
-            endpk must accompany endrk. Key values are inclusive. If omitted, 
-            there is no upper bound on the table entities that can be accessed.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_table(
-            table_name,
-            permission=permission, 
-            expiry=expiry,
-            start=start, 
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            start_pk=start_pk,
-            start_rk=start_rk,
-            end_pk=end_pk,
-            end_rk=end_rk,
-        )
-
-    def get_table_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Table service, including
-        logging, analytics and CORS rules.
-
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The table service properties.
-        :rtype: :class:`~azure.storage.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = '/'
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_service_properties(response.body)
-
-    def set_table_service_properties(self, logging=None, hour_metrics=None, 
-                                    minute_metrics=None, cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Table service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved. 
-        For more information on Azure Storage Analytics, see 
-        https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
-
-        :param Logging logging:
-            The logging settings provide request logs.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for blobs.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for blobs.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service. For detailed information 
-            about CORS rules and evaluation logic, see 
-            https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
-        :type cors: list of :class:`~azure.storage.models.CorsRule`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = '/'
-        request.query = [
-            ('restype', 'service'),
-            ('comp', 'properties'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
-
-        self._perform_request(request)
-
-    def list_tables(self, num_results=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the tables. The generator will lazily follow 
-        the continuation tokens returned by the service and stop when all tables 
-        have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        tables, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param int num_results:
-            The maximum number of tables to return.
-        :param marker:
-            An opaque continuation object. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :type marker: obj
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        :return: A generator which produces :class:`~azure.storage.models.table.Table` objects.
-        :rtype: :class:`~azure.storage.models.ListGenerator`:
-        '''
-        kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout}
-        resp = self._list_tables(**kwargs)
-
-        return ListGenerator(resp, self._list_tables, (), kwargs)
-
-    def _list_tables(self, max_results=None, marker=None, timeout=None):
-        '''
-        Returns a list of tables under the specified account. Makes a single list 
-        request to the service. Used internally by the list_tables method.
-
-        :param int max_results:
-            The maximum number of tables to return. A single list request may 
-            return up to 1000 tables and potentially a continuation token which 
-            should be followed to get additional resutls.
-        :param marker:
-            A dictionary which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            queues. The marker value is opaque to the client.
-        :type marker: obj
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A list of tables, potentially with a next_marker property.
-        :rtype: list of :class:`~azure.storage.models.table.Table`:  
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = '/Tables'
-        request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)]
-        request.query = [
-            ('$top', _int_to_str(max_results)),
-            ('NextTableName', _to_str(marker)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_json_response_to_tables(response)
-
-    def create_table(self, table_name, fail_on_exist=False, timeout=None):
-        '''
-        Creates a new table in the storage account.
-
-        :param str table_name:
-            The name of the table to create. The table name may contain only
-            alphanumeric characters and cannot begin with a numeric character.
-            It is case-insensitive and must be from 3 to 63 characters long.
-        :param bool fail_on_exist:
-            Specifies whether to throw an exception if the table already exists.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the table was created. If fail_on_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('table', table_name)
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host = self._get_host()
-        request.path = '/Tables'
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
-                           _DEFAULT_PREFER_HEADER,
-                           _DEFAULT_ACCEPT_HEADER]
-        request.body = _get_request_body(_convert_table_to_json(table_name))
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def exists(self, table_name, timeout=None):
-        '''
-        Returns a boolean indicating whether the table exists.
-
-        :param str table_name:
-            The name of table to check for existence.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A boolean indicating whether the table exists.
-        :rtype: bool
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = '/Tables' + "('" + table_name + "')"
-        request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)]
-        request.query = [('timeout', _int_to_str(timeout))]
-
-        try:
-            self._perform_request(request)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def delete_table(self, table_name, fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified table and any data it contains.
-
-        When a table is successfully deleted, it is immediately marked for deletion 
-        and is no longer accessible to clients. The table is later removed from 
-        the Table service during garbage collection.
-
-        Note that deleting a table is likely to take at least 40 seconds to complete. 
-        If an operation is attempted against the table while it was being deleted, 
-        an :class:`AzureConflictHttpError` will be thrown.
-
-        :param str table_name:
-            The name of the table to delete.
-        :param bool fail_not_exist:
-            Specifies whether to throw an exception if the table doesn't exist.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the table was deleted. If fail_not_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host = self._get_host()
-        request.path = '/Tables(\'' + _to_str(table_name) + '\')'
-        request.query = [('timeout', _int_to_str(timeout))]
-        request.headers = [_DEFAULT_ACCEPT_HEADER]
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_table_acl(self, table_name, timeout=None):
-        '''
-        Returns details about any stored access policies specified on the
-        table that may be used with Shared Access Signatures.
-
-        :param str table_name:
-            The name of an existing table.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the table.
-        :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`:
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = '/' + _to_str(table_name)
-        request.query = [
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_xml_to_signed_identifiers(response.body)
-
-    def set_table_acl(self, table_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets stored access policies for the table that may be used with Shared 
-        Access Signatures. 
-        
-        When you set permissions for a table, the existing permissions are replaced. 
-        To update the table’s permissions, call :func:`~get_table_acl` to fetch 
-        all access policies associated with the table, modify the access policy 
-        that you wish to change, and then call this function with the complete 
-        set of data to perform the update.
-
-        When you establish a stored access policy on a table, it may take up to 
-        30 seconds to take effect. During this interval, a shared access signature 
-        that is associated with the stored access policy will throw an 
-        :class:`AzureHttpError` until the access policy becomes active.
-
-        :param str table_name:
-            The name of an existing table.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the table. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('table_name', table_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host = self._get_host()
-        request.path = '/' + _to_str(table_name)
-        request.query = [
-            ('comp', 'acl'),
-            ('timeout', _int_to_str(timeout)),
-        ]
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        self._perform_request(request)
-
-    def query_entities(self, table_name, filter=None, select=None, num_results=None,
-                       marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
-                       property_resolver=None, timeout=None):
-        '''
-        Returns a generator to list the entities in the table specified. The 
-        generator will lazily follow the continuation tokens returned by the 
-        service and stop when all entities have been returned or max_results is 
-        reached.
-
-        If max_results is specified and the account has more than that number of 
-        entities, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str table_name:
-            The name of the table to query.
-        :param str filter:
-            Returns only entities that satisfy the specified filter. Note that 
-            no more than 15 discrete comparisons are permitted within a $filter 
-            string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx 
-            for more information on constructing filters.
-        :param str select:
-            Returns only the desired properties of an entity from the set.
-        :param int num_results:
-            The maximum number of entities to return.
-        :param marker:
-            An opaque continuation object. This value can be retrieved from the 
-            next_marker field of a previous generator object if max_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :type marker: obj
-        :param str accept:
-            Specifies the accepted content type of the response payload. See 
-            :class:`~azure.storage.table.models.TablePayloadFormat` for possible 
-            values.
-        :param property_resolver:
-            A function which given the partition key, row key, property name, 
-            property value, and the property EdmType if returned by the service, 
-            returns the EdmType of the property. Generally used if accept is set 
-            to JSON_NO_METADATA.
-        :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        :return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
-        :rtype: :class:`~azure.storage.models.ListGenerator`
-        '''
-        args = (table_name,)
-        kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker, 
-                  'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout}
-        resp = self._query_entities(*args, **kwargs)
-
-        return ListGenerator(resp, self._query_entities, args, kwargs)
-
-    def _query_entities(self, table_name, filter=None, select=None, max_results=None,
-                       marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
-                       property_resolver=None, timeout=None):
-        '''
-        Returns a list of entities under the specified table. Makes a single list 
-        request to the service. Used internally by the query_entities method.
-
-        :param str table_name:
-            The name of the table to query.
-        :param str filter:
-            Returns only entities that satisfy the specified filter. Note that 
-            no more than 15 discrete comparisons are permitted within a $filter 
-            string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx 
-            for more information on constructing filters.
-        :param str select:
-            Returns only the desired properties of an entity from the set.
-        :param int top:
-            The maximum number of entities to return.
-        :param marker:
-            A dictionary which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            queues. The marker value is opaque to the client.
-        :type marker: obj
-        :param str accept:
-            Specifies the accepted content type of the response payload. See 
-            :class:`~azure.storage.table.models.TablePayloadFormat` for possible 
-            values.
-        :param property_resolver:
-            A function which given the partition key, row key, property name, 
-            property value, and the property EdmType if returned by the service, 
-            returns the EdmType of the property. Generally used if accept is set 
-            to JSON_NO_METADATA.
-        :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A list of entities, potentially with a next_marker property.
-        :rtype: list of :class:`~azure.storage.table.models.Entity`
-        '''
-        _validate_not_none('table_name', table_name)
-        _validate_not_none('accept', accept)
-        next_partition_key = None if marker is None else marker.get('nextpartitionkey')
-        next_row_key = None if marker is None else marker.get('nextrowkey')
-
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host = self._get_host()
-        request.path = '/' + _to_str(table_name) + '()'
-        request.headers = [('Accept', _to_str(accept))]
-        request.query = [
-            ('$filter', _to_str(filter)),
-            ('$select', _to_str(select)),
-            ('$top', _int_to_str(max_results)),
-            ('NextPartitionKey', _to_str(next_partition_key)),
-            ('NextRowKey', _to_str(next_row_key)),
-            ('timeout', _int_to_str(timeout)),
-        ]
-
-        response = self._perform_request(request)
-        return _convert_json_response_to_entities(response, property_resolver)
-
-    def commit_batch(self, table_name, batch, timeout=None):
-        '''
-        Commits a :class:`~azure.storage.table.TableBatch` request.
-
-        :param str table_name:
-            The name of the table to commit the batch to.
-        :param TableBatch batch:
-            The batch to commit.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A list of the batch responses corresponding to the requests in the batch.
-        :rtype: list of response objects
-        '''
-        _validate_not_none('table_name', table_name)
-
-        # Construct the batch request
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host = self._get_host()
-        request.path = '/' + '$batch'
-        request.query = [('timeout', _int_to_str(timeout))]
-
-        # Update the batch operation requests with table and client specific info
-        for row_key, batch_request in batch._requests:
-            batch_request.host = self._get_host()
-            if batch_request.method == 'POST':
-                batch_request.path = '/' + _to_str(table_name)
-            else:
-                batch_request.path = _get_entity_path(table_name, batch._partition_key, row_key)
-            _update_request(batch_request)
-
-        # Construct the batch body
-        request.body, boundary = _convert_batch_to_json(batch._requests)
-        request.headers = [('Content-Type', boundary)]
-
-        # Perform the batch request and return the response
-        response = self._perform_request(request)
-        responses = _parse_batch_response(response.body)
-        return responses
-
-    @contextmanager
-    def batch(self, table_name, timeout=None):
-        '''
-        Creates a batch object which can be used as a context manager. Commits the batch on exit.
-
-        :param str table_name:
-            The name of the table to commit the batch to.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        batch = TableBatch()
-        yield batch
-        self.commit_batch(table_name, batch, timeout=timeout)
-
-    def get_entity(self, table_name, partition_key, row_key, select=None,
-                   accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
-                   property_resolver=None, timeout=None):
-        '''
-        Get an entity from the specified table. Throws if the entity does not exist.
-
-        :param str table_name:
-            The name of the table to get the entity from.
-        :param str partition_key:
-            The PartitionKey of the entity.
-        :param str row_key:
-            The RowKey of the entity.
-        :param str select:
-            Returns only the desired properties of an entity from the set.
-        :param str accept:
-            Specifies the accepted content type of the response payload. See 
-            :class:`~azure.storage.table.models.TablePayloadFormat` for possible 
-            values.
-        :param property_resolver:
-            A function which given the partition key, row key, property name, 
-            property value, and the property EdmType if returned by the service, 
-            returns the EdmType of the property. Generally used if accept is set 
-            to JSON_NO_METADATA.
-        :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The retrieved entity.
-        :rtype: :class:`~azure.storage.table.models.Entity`
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _get_entity(partition_key, row_key, select, accept)
-        request.host = self._get_host()
-        request.path = _get_entity_path(table_name, partition_key, row_key)
-        request.query += [('timeout', _int_to_str(timeout))]
-
-        response = self._perform_request(request)
-        return _convert_json_response_to_entity(response, property_resolver)
-
-    def insert_entity(self, table_name, entity, timeout=None):
-        '''
-        Inserts a new entity into the table. Throws if an entity with the same 
-        PartitionKey and RowKey already exists.
-
-        When inserting an entity into a table, you must specify values for the 
-        PartitionKey and RowKey system properties. Together, these properties 
-        form the primary key and must be unique within the table. Both the 
-        PartitionKey and RowKey values must be string values; each key value may 
-        be up to 64 KB in size. If you are using an integer value for the key 
-        value, you should convert the integer to a fixed-width string, because 
-        they are canonically sorted. For example, you should convert the value 
-        1 to 0000001 to ensure proper sorting.
-
-        :param str table_name:
-            The name of the table to insert the entity into.
-        :param entity:
-            The entity to insert. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`~azure.storage.table.models.Entity`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the inserted entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _insert_entity(entity)
-        request.host = self._get_host()
-        request.path = '/' + _to_str(table_name)
-        request.query += [('timeout', _int_to_str(timeout))]
-
-        response = self._perform_request(request)
-        return _extract_etag(response)
-
-    def update_entity(self, table_name, entity, if_match='*', timeout=None):
-        '''
-        Updates an existing entity in a table. Throws if the entity does not exist. 
-        The update_entity operation replaces the entire entity and can be used to 
-        remove properties.
-
-        :param str table_name:
-            The name of the table containing the entity to update.
-        :param entity:
-            The entity to update. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`~azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The update operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional update, set If-Match to the wildcard character (*).
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _update_entity(entity, if_match)
-        request.host = self._get_host()
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-        request.query += [('timeout', _int_to_str(timeout))]
-
-        response = self._perform_request(request)
-        return _extract_etag(response)
-
-    def merge_entity(self, table_name, entity, if_match='*', timeout=None):
-        '''
-        Updates an existing entity by merging the entity's properties. Throws 
-        if the entity does not exist. 
-        
-        This operation does not replace the existing entity as the update_entity
-        operation does. A property cannot be removed with merge_entity.
-        
-        Any properties with null values are ignored. All other properties will be 
-        updated or added.
-
-        :param str table_name:
-            The name of the table containing the entity to merge.
-        :param entity:
-            The entity to merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`~azure.storage.table.models.Entity`
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The merge operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional merge, set If-Match to the wildcard character (*).
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _merge_entity(entity, if_match)
-        request.host = self._get_host()
-        request.query += [('timeout', _int_to_str(timeout))]
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-
-        response = self._perform_request(request)
-        return _extract_etag(response)
-
-    def delete_entity(self, table_name, partition_key, row_key,
-                      if_match='*', timeout=None):
-        '''
-        Deletes an existing entity in a table. Throws if the entity does not exist.
-
-        When an entity is successfully deleted, the entity is immediately marked 
-        for deletion and is no longer accessible to clients. The entity is later 
-        removed from the Table service during garbage collection.
-
-        :param str table_name:
-            The name of the table containing the entity to delete.
-        :param str partition_key:
-            The PartitionKey of the entity.
-        :param str row_key:
-            The RowKey of the entity.
-        :param str if_match:
-            The client may specify the ETag for the entity on the 
-            request in order to compare to the ETag maintained by the service 
-            for the purpose of optimistic concurrency. The delete operation 
-            will be performed only if the ETag sent by the client matches the 
-            value maintained by the server, indicating that the entity has 
-            not been modified since it was retrieved by the client. To force 
-            an unconditional delete, set If-Match to the wildcard character (*).
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _delete_entity(partition_key, row_key, if_match)
-        request.host = self._get_host()
-        request.query += [('timeout', _int_to_str(timeout))]
-        request.path = _get_entity_path(table_name, partition_key, row_key)
-
-        self._perform_request(request)
-
-    def insert_or_replace_entity(self, table_name, entity, timeout=None):
-        '''
-        Replaces an existing entity or inserts a new entity if it does not
-        exist in the table. Because this operation can insert or update an
-        entity, it is also known as an "upsert" operation.
-
-        If insert_or_replace_entity is used to replace an entity, any properties 
-        from the previous entity will be removed if the new entity does not define 
-        them.
-
-        :param str table_name:
-            The name of the table in which to insert or replace the entity.
-        :param entity:
-            The entity to insert or replace. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`~azure.storage.table.models.Entity`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _insert_or_replace_entity(entity)
-        request.host = self._get_host()
-        request.query += [('timeout', _int_to_str(timeout))]
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-
-        response = self._perform_request(request)
-        return _extract_etag(response)
-
-    def insert_or_merge_entity(self, table_name, entity, timeout=None):
-        '''
-        Merges an existing entity or inserts a new entity if it does not exist
-        in the table. 
-
-        If insert_or_merge_entity is used to merge an entity, any properties from 
-        the previous entity will be retained if the request does not define or 
-        include them.
-
-        :param str table_name:
-            The name of the table in which to insert or merge the entity.
-        :param entity:
-            The entity to insert or merge. Could be a dict or an entity object. 
-            Must contain a PartitionKey and a RowKey.
-        :type entity: a dict or :class:`~azure.storage.table.models.Entity`
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The etag of the entity.
-        :rtype: str
-        '''
-        _validate_not_none('table_name', table_name)
-        request = _insert_or_merge_entity(entity)
-        request.host = self._get_host()
-        request.query += [('timeout', _int_to_str(timeout))]
-        request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
-
-        response = self._perform_request(request)
-        return _extract_etag(response)
-
-    def _perform_request_worker(self, request):
-        _update_storage_table_header(request)
-        return super(TableService, self)._perform_request_worker(request)
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,40 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from .appendblobservice import AppendBlobService
-from .blockblobservice import BlockBlobService
-from .models import (
-    Container,
-    ContainerProperties,
-    Blob,
-    BlobProperties,
-    BlobBlock,
-    BlobBlockList,
-    PageRange,
-    ContentSettings,
-    CopyProperties,
-    ContainerPermissions,
-    BlobPermissions,
-    _LeaseActions,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    Include,
-    SequenceNumberAction,
-    BlockListType,
-    PublicAccess,
-    BlobPrefix,
-    DeleteSnapshot,
-)
-from .pageblobservice import PageBlobService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,23 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '0.37.1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-04-17'
-
-# internal configurations, should not be changed
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,436 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from azure.common import AzureException
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from ..common._common_conversion import (
-    _decode_base64_to_text,
-    _to_str,
-    _get_content_md5
-)
-from ..common._deserialization import (
-    _parse_properties,
-    _int_to_str,
-    _parse_metadata,
-    _convert_xml_to_signed_identifiers,
-    _bool,
-)
-from .models import (
-    Container,
-    Blob,
-    BlobBlock,
-    BlobBlockList,
-    BlobBlockState,
-    BlobProperties,
-    PageRange,
-    ContainerProperties,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    BlobPrefix,
-)
-from ._encryption import _decrypt_blob
-from ..common.models import _list
-from ..common._error import (
-    _validate_content_match,
-    _ERROR_DECRYPTION_FAILURE,
-)
-
-
-def _parse_base_properties(response):
-    '''
-    Extracts basic response headers.
-    '''
-    resource_properties = ResourceProperties()
-    resource_properties.last_modified = parser.parse(response.headers.get('last-modified'))
-    resource_properties.etag = response.headers.get('etag')
-
-    return resource_properties
-
-
-def _parse_page_properties(response):
-    '''
-    Extracts page response headers.
-    '''
-    put_page = PageBlobProperties()
-    put_page.last_modified = parser.parse(response.headers.get('last-modified'))
-    put_page.etag = response.headers.get('etag')
-    put_page.sequence_number = _int_to_str(response.headers.get('x-ms-blob-sequence-number'))
-
-    return put_page
-
-
-def _parse_append_block(response):
-    '''
-    Extracts append block response headers.
-    '''
-    append_block = AppendBlockProperties()
-    append_block.last_modified = parser.parse(response.headers.get('last-modified'))
-    append_block.etag = response.headers.get('etag')
-    append_block.append_offset = _int_to_str(response.headers.get('x-ms-blob-append-offset'))
-    append_block.committed_block_count = _int_to_str(response.headers.get('x-ms-blob-committed-block-count'))
-
-    return append_block
-
-
-def _parse_snapshot_blob(response, name):
-    '''
-    Extracts snapshot return header.
-    '''
-    snapshot = response.headers.get('x-ms-snapshot')
-
-    return _parse_blob(response, name, snapshot)
-
-
-def _parse_lease(response):
-    '''
-    Extracts lease time and ID return headers.
-    '''
-    lease = {'time': response.headers.get('x-ms-lease-time')}
-    if lease['time']:
-        lease['time'] = _int_to_str(lease['time'])
-
-    lease['id'] = response.headers.get('x-ms-lease-id')
-
-    return lease
-
-
-def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False,
-                key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, BlobProperties)
-
-    # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5
-    content_settings = getattr(props, 'content_settings')
-    if 'content-range' in response.headers:
-        if 'x-ms-blob-content-md5' in response.headers:
-            setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5']))
-        else:
-            delattr(content_settings, 'content_md5')
-
-    if validate_content:
-        computed_md5 = _get_content_md5(response.body)
-        _validate_content_match(response.headers['content-md5'], computed_md5)
-
-    if key_encryption_key is not None or key_resolver_function is not None:
-        try:
-            response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function,
-                                          response, start_offset, end_offset)
-        except:
-            raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-    return Blob(name, snapshot, response.body, props, metadata)
-
-
-def _parse_container(response, name):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ContainerProperties)
-    return Container(name, props, metadata)
-
-
-def _convert_xml_to_signed_identifiers_and_access(response):
-    acl = _convert_xml_to_signed_identifiers(response)
-    acl.public_access = response.headers.get('x-ms-blob-public-access')
-
-    return acl
-
-
-def _convert_xml_to_containers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.blob.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Containers>
-        <Container>
-          <Name>container-name</Name>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <LeaseStatus>locked | unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <PublicAccess>blob | container</PublicAccess>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Container>
-      </Containers>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    containers = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    setattr(containers, 'next_marker', list_element.findtext('NextMarker'))
-
-    containers_element = list_element.find('Containers')
-
-    for container_element in containers_element.findall('Container'):
-        # Name element
-        container = Container()
-        container.name = container_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = container_element.find('Metadata')
-        if metadata_root_element is not None:
-            container.metadata = dict()
-            for metadata_element in metadata_root_element:
-                container.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = container_element.find('Properties')
-        container.properties.etag = properties_element.findtext('Etag')
-        container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        container.properties.lease_status = properties_element.findtext('LeaseStatus')
-        container.properties.lease_state = properties_element.findtext('LeaseState')
-        container.properties.lease_duration = properties_element.findtext('LeaseDuration')
-        container.properties.public_access = properties_element.findtext('PublicAccess')
-
-        # Add container to list
-        containers.append(container)
-
-    return containers
-
-
-LIST_BLOBS_ATTRIBUTE_MAP = {
-    'Last-Modified': (None, 'last_modified', parser.parse),
-    'Etag': (None, 'etag', _to_str),
-    'x-ms-blob-sequence-number': (None, 'sequence_number', _int_to_str),
-    'BlobType': (None, 'blob_type', _to_str),
-    'Content-Length': (None, 'content_length', _int_to_str),
-    'ServerEncrypted': (None, 'server_encrypted', _bool),
-    'Content-Type': ('content_settings', 'content_type', _to_str),
-    'Content-Encoding': ('content_settings', 'content_encoding', _to_str),
-    'Content-Disposition': ('content_settings', 'content_disposition', _to_str),
-    'Content-Language': ('content_settings', 'content_language', _to_str),
-    'Content-MD5': ('content_settings', 'content_md5', _to_str),
-    'Cache-Control': ('content_settings', 'cache_control', _to_str),
-    'LeaseStatus': ('lease', 'status', _to_str),
-    'LeaseState': ('lease', 'state', _to_str),
-    'LeaseDuration': ('lease', 'duration', _to_str),
-    'CopyId': ('copy', 'id', _to_str),
-    'CopySource': ('copy', 'source', _to_str),
-    'CopyStatus': ('copy', 'status', _to_str),
-    'CopyProgress': ('copy', 'progress', _to_str),
-    'CopyCompletionTime': ('copy', 'completion_time', _to_str),
-    'CopyStatusDescription': ('copy', 'status_description', _to_str),
-    'AccessTier': (None, 'blob_tier', _to_str),
-    'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse),
-    'AccessTierInferred': (None, 'blob_tier_inferred', _bool),
-    'ArchiveStatus': (None, 'rehydration_status', _to_str),
-}
-
-
-def _convert_xml_to_blob_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Delimiter>string-value</Delimiter>
-      <Blobs>
-        <Blob>
-          <Name>blob-name</name>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date-time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Content-Length>size-in-bytes</Content-Length>
-            <Content-Type>blob-content-type</Content-Type>
-            <Content-Encoding />
-            <Content-Language />
-            <Content-MD5 />
-            <Cache-Control />
-            <x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
-            <BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
-            <LeaseStatus>locked|unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <CopyId>id</CopyId>
-            <CopyStatus>pending | success | aborted | failed </CopyStatus>
-            <CopySource>source url</CopySource>
-            <CopyProgress>bytes copied/bytes total</CopyProgress>
-            <CopyCompletionTime>datetime</CopyCompletionTime>
-            <CopyStatusDescription>error string</CopyStatusDescription>
-            <AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
-            <AccessTierChangeTime>date-time-value</AccessTierChangeTime>
-            <AccessTierInferred>true</AccessTierInferred>
-          </Properties>
-          <Metadata>   
-            <Name>value</Name>
-          </Metadata>
-        </Blob>
-        <BlobPrefix>
-          <Name>blob-prefix</Name>
-        </BlobPrefix>
-      </Blobs>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    blob_list = _list()
-    list_element = ETree.fromstring(response.body)
-
-    setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
-
-    blobs_element = list_element.find('Blobs')
-    blob_prefix_elements = blobs_element.findall('BlobPrefix')
-    if blob_prefix_elements is not None:
-        for blob_prefix_element in blob_prefix_elements:
-            prefix = BlobPrefix()
-            prefix.name = blob_prefix_element.findtext('Name')
-            blob_list.append(prefix)
-
-    for blob_element in blobs_element.findall('Blob'):
-        blob = Blob()
-        blob.name = blob_element.findtext('Name')
-        blob.snapshot = blob_element.findtext('Snapshot')
-
-        # Properties
-        properties_element = blob_element.find('Properties')
-        if properties_element is not None:
-            for property_element in properties_element:
-                info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag)
-                if info is None:
-                    setattr(blob.properties, property_element.tag, _to_str(property_element.text))
-                elif info[0] is None:
-                    setattr(blob.properties, info[1], info[2](property_element.text))
-                else:
-                    attr = getattr(blob.properties, info[0])
-                    setattr(attr, info[1], info[2](property_element.text))
-
-        # Metadata
-        metadata_root_element = blob_element.find('Metadata')
-        if metadata_root_element is not None:
-            blob.metadata = dict()
-            for metadata_element in metadata_root_element:
-                blob.metadata[metadata_element.tag] = metadata_element.text
-
-        # Add blob to list
-        blob_list.append(blob)
-
-    return blob_list
-
-
-def _convert_xml_to_block_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <CommittedBlocks>
-         <Block>
-            <Name>base64-encoded-block-id</Name>
-            <Size>size-in-bytes</Size>
-         </Block>
-      </CommittedBlocks>
-      <UncommittedBlocks>
-        <Block>
-          <Name>base64-encoded-block-id</Name>
-          <Size>size-in-bytes</Size>
-        </Block>
-      </UncommittedBlocks>
-     </BlockList>
-
-    Converts xml response to block list class.
-    '''
-    if response is None or response.body is None:
-        return None
-
-    block_list = BlobBlockList()
-
-    list_element = ETree.fromstring(response.body)
-
-    committed_blocks_element = list_element.find('CommittedBlocks')
-    if committed_blocks_element is not None:
-        for block_element in committed_blocks_element.findall('Block'):
-            block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-            block_size = int(block_element.findtext('Size'))
-            block = BlobBlock(id=block_id, state=BlobBlockState.Committed)
-            block._set_size(block_size)
-            block_list.committed_blocks.append(block)
-
-    uncommitted_blocks_element = list_element.find('UncommittedBlocks')
-    if uncommitted_blocks_element is not None:
-        for block_element in uncommitted_blocks_element.findall('Block'):
-            block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-            block_size = int(block_element.findtext('Size'))
-            block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted)
-            block._set_size(block_size)
-            block_list.uncommitted_blocks.append(block)
-
-    return block_list
-
-
-def _convert_xml_to_page_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <PageList>
-       <PageRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </PageRange> 
-       <ClearRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </ClearRange> 
-       <PageRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </PageRange> 
-    </PageList> 
-    '''
-    if response is None or response.body is None:
-        return None
-
-    page_list = list()
-
-    list_element = ETree.fromstring(response.body)
-
-    for page_range_element in list_element:
-        if page_range_element.tag == 'PageRange':
-            is_cleared = False
-        elif page_range_element.tag == 'ClearRange':
-            is_cleared = True
-        else:
-            pass  # ignore any unrecognized Page Range types
-
-        page_list.append(
-            PageRange(
-                int(page_range_element.findtext('Start')),
-                int(page_range_element.findtext('End')),
-                is_cleared
-            )
-        )
-
-    return page_list
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_download_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_download_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_download_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_download_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,136 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import threading
-
-from ..common._error import _ERROR_NO_SINGLE_THREAD_CHUNKING
-
-
-def _download_blob_chunks(blob_service, container_name, blob_name, snapshot,
-                          download_size, block_size, progress, start_range, end_range,
-                          stream, max_connections, progress_callback, validate_content,
-                          lease_id, if_modified_since, if_unmodified_since, if_match,
-                          if_none_match, timeout, operation_context):
-    if max_connections <= 1:
-        raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('blob'))
-
-    downloader = _BlobChunkDownloader(
-        blob_service,
-        container_name,
-        blob_name,
-        snapshot,
-        download_size,
-        block_size,
-        progress,
-        start_range,
-        end_range,
-        stream,
-        progress_callback,
-        validate_content,
-        lease_id,
-        if_modified_since,
-        if_unmodified_since,
-        if_match,
-        if_none_match,
-        timeout,
-        operation_context,
-    )
-
-    import concurrent.futures
-    executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-    result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-
-
-class _BlobChunkDownloader(object):
-    def __init__(self, blob_service, container_name, blob_name, snapshot, download_size,
-                 chunk_size, progress, start_range, end_range, stream,
-                 progress_callback, validate_content, lease_id, if_modified_since,
-                 if_unmodified_since, if_match, if_none_match, timeout, operation_context):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.snapshot = snapshot
-        self.chunk_size = chunk_size
-
-        self.download_size = download_size
-        self.start_index = start_range
-        self.blob_end = end_range
-
-        self.stream = stream
-        self.stream_start = stream.tell()
-        self.stream_lock = threading.Lock()
-        self.progress_callback = progress_callback
-        self.progress_total = progress
-        self.progress_lock = threading.Lock()
-        self.timeout = timeout
-        self.operation_context = operation_context
-
-        self.validate_content = validate_content
-        self.lease_id = lease_id
-        self.if_modified_since = if_modified_since
-        self.if_unmodified_since = if_unmodified_since
-        self.if_match = if_match
-        self.if_none_match = if_none_match
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.blob_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.blob_end:
-            chunk_end = self.blob_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total = self.progress_total
-                self.progress_callback(total, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-            self.stream.write(chunk_data)
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        response = self.blob_service._get_blob(
-            self.container_name,
-            self.blob_name,
-            snapshot=self.snapshot,
-            start_range=chunk_start,
-            end_range=chunk_end - 1,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            if_modified_since=self.if_modified_since,
-            if_unmodified_since=self.if_unmodified_since,
-            if_match=self.if_match,
-            if_none_match=self.if_none_match,
-            timeout=self.timeout,
-            _context=self.operation_context
-        )
-
-        # This makes sure that if_match is set so that we can validate 
-        # that subsequent downloads are to an unmodified blob
-        self.if_match = response.properties.etag
-        return response
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_encryption.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,196 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-from json import (
-    dumps,
-    loads,
-)
-from os import urandom
-
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _generate_AES_CBC_cipher,
-    _dict_to_encryption_data,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-    _ERROR_DATA_NOT_ENCRYPTED,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-)
-
-
-def _encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). 
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the _upload_blob_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def _generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-    
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the 
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def _decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                  response, start_offset, end_offset):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-    
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key 
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    _validate_not_none('response', response)
-    content = response.body
-    _validate_not_none('content', content)
-
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata']))
-    except:
-        if require_encryption:
-            raise ValueError(_ERROR_DATA_NOT_ENCRYPTED)
-        else:
-            return content
-
-    if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    blob_type = response.headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    start_range, end_range = 0, len(content)
-    if 'content-range' in response.headers:
-        content_range = response.headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        start_range = int(content_range[0])
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def _get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_error.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,38 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
-    'Invalid page blob size: {0}. ' + \
-    'The size must be aligned to a 512-byte boundary.'
-
-_ERROR_PAGE_BLOB_START_ALIGNMENT = \
-    'start_range must align with 512 page size'
-
-_ERROR_PAGE_BLOB_END_ALIGNMENT = \
-    'end_range must align with 512 page size'
-
-_ERROR_INVALID_BLOCK_ID = \
-    'All blocks in block list need to have valid block ids.'
-
-_ERROR_INVALID_LEASE_DURATION = \
-    "lease_duration param needs to be between 15 and 60 or -1."
-
-_ERROR_INVALID_LEASE_BREAK_PERIOD = \
-    "lease_break_period param needs to be between 0 and 60."
-
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use blob chunk downloader more than 1 thread must be ' + \
-    'used since get_blob_to_bytes should be called for single threaded ' + \
-    'blob downloads.'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,127 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from xml.sax.saxutils import escape as xml_escape
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from ..common._common_conversion import (
-    _encode_base64,
-    _str,
-)
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-from ._error import (
-    _ERROR_PAGE_BLOB_START_ALIGNMENT,
-    _ERROR_PAGE_BLOB_END_ALIGNMENT,
-    _ERROR_INVALID_BLOCK_ID,
-)
-from io import BytesIO
-
-
-def _get_path(container_name=None, blob_name=None):
-    '''
-    Creates the path to access a blob resource.
-
-    container_name:
-        Name of container.
-    blob_name:
-        The path to the blob.
-    '''
-    if container_name and blob_name:
-        return '/{0}/{1}'.format(
-            _str(container_name),
-            _str(blob_name))
-    elif container_name:
-        return '/{0}'.format(_str(container_name))
-    else:
-        return '/'
-
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
-                                       end_range_required=True, check_content_md5=False, align_to_page=False):
-    # If end range is provided, start range must be provided
-    if start_range_required or end_range is not None:
-        _validate_not_none('start_range', start_range)
-    if end_range_required:
-        _validate_not_none('end_range', end_range)
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT)
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT)
-
-    # Format based on whether end_range is present
-    request.headers = request.headers or {}
-    if end_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        request.headers['x-ms-range'] = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers['x-ms-range-get-content-md5'] = 'true'
-
-
-def _convert_block_list_to_xml(block_id_list):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <Committed>first-base64-encoded-block-id</Committed>
-      <Uncommitted>second-base64-encoded-block-id</Uncommitted>
-      <Latest>third-base64-encoded-block-id</Latest>
-    </BlockList>
-
-    Convert a block list to xml to send.
-
-    block_id_list:
-        A list of BlobBlock containing the block ids and block state that are used in put_block_list.
-    Only get block from latest blocks.
-    '''
-    if block_id_list is None:
-        return ''
-
-    block_list_element = ETree.Element('BlockList')
-
-    # Enabled
-    for block in block_id_list:
-        if block.id is None:
-            raise ValueError(_ERROR_INVALID_BLOCK_ID)
-        id = xml_escape(_str(format(_encode_base64(block.id))))
-        ETree.SubElement(block_list_element, block.state).text = id
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    # return xml value
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_upload_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_upload_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/_upload_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/_upload_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,484 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from math import ceil
-from threading import Lock
-
-from ..common._common_conversion import _encode_base64
-from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-from ..common._serialization import (
-    url_quote,
-    _get_data_bytes_only,
-    _len_plus
-)
-from ._encryption import (
-    _get_blob_encryptor_and_padder,
-)
-from .models import BlobBlock
-from ._constants import (
-    _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-)
-
-
-def _upload_blob_chunks(blob_service, container_name, blob_name,
-                        blob_size, block_size, stream, max_connections,
-                        progress_callback, validate_content, lease_id, uploader_class,
-                        maxsize_condition=None, if_match=None, timeout=None,
-                        content_encryption_key=None, initialization_vector=None, resource_properties=None):
-    encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
-                                                       uploader_class is not _PageBlobChunkUploader)
-
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        lease_id,
-        timeout,
-        encryptor,
-        padder
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # ETag matching does not work with parallelism as a ranged upload may start 
-    # before the previous finishes and provides an etag
-    uploader.if_match = if_match if not max_connections > 1 else None
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        from threading import BoundedSemaphore
-
-        '''
-        Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
-        This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
-        the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
-        '''
-        chunk_throttler = BoundedSemaphore(max_connections + 1)
-
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        futures = []
-        running_futures = []
-
-        # Check for exceptions and fail fast.
-        for chunk in uploader.get_chunk_streams():
-            for f in running_futures:
-                if f.done():
-                    if f.exception():
-                        raise f.exception()
-                    else:
-                        running_futures.remove(f)
-
-            chunk_throttler.acquire()
-            future = executor.submit(uploader.process_chunk, chunk)
-
-            # Calls callback upon completion (even if the callback was added after the Future task is done).
-            future.add_done_callback(lambda x: chunk_throttler.release())
-            futures.append(future)
-            running_futures.append(future)
-
-        # result() will wait until completion and also raise any exceptions that may have been set.
-        range_ids = [f.result() for f in futures]
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-
-    if resource_properties:
-        resource_properties.last_modified = uploader.last_modified
-        resource_properties.etag = uploader.etag
-
-    return range_ids
-
-
-def _upload_blob_substream_blocks(blob_service, container_name, blob_name,
-                                  blob_size, block_size, stream, max_connections,
-                                  progress_callback, validate_content, lease_id, uploader_class,
-                                  maxsize_condition=None, if_match=None, timeout=None):
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        lease_id,
-        timeout,
-        None,
-        None
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # ETag matching does not work with parallelism as a ranged upload may start
-    # before the previous finishes and provides an etag
-    uploader.if_match = if_match if not max_connections > 1 else None
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks()))
-    else:
-        range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()]
-
-    return range_ids
-
-
-class _BlobChunkUploader(object):
-    def __init__(self, blob_service, container_name, blob_name, blob_size,
-                 chunk_size, stream, parallel, progress_callback,
-                 validate_content, lease_id, timeout, encryptor, padder):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.blob_size = blob_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-        self.validate_content = validate_content
-        self.lease_id = lease_id
-        self.timeout = timeout
-        self.encryptor = encryptor
-        self.padder = padder
-        self.last_modified = None
-        self.etag = None
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.blob_size:
-                    read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                temp = _get_data_bytes_only('temp', temp)
-                data += temp
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, BytesIO(data)
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if len(data) > 0:
-                    yield index, BytesIO(data)
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1].read()
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.blob_size)
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.blob_size
-
-        if blob_length is None:
-            blob_length = _len_plus(self.stream)
-            if blob_length is None:
-                raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream'))
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            yield ('BlockId{}'.format("%05d" % i),
-                   _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size,
-                              lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class _BlockBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
-        self.blob_service._put_block(
-            self.container_name,
-            self.blob_name,
-            chunk_data,
-            block_id,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            timeout=self.timeout,
-        )
-        return BlobBlock(block_id)
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.blob_service._put_block(
-                self.container_name,
-                self.blob_name,
-                block_stream,
-                block_id,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                timeout=self.timeout,
-            )
-        finally:
-            block_stream.close()
-        return BlobBlock(block_id)
-
-
-class _PageBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_start, chunk_data):
-        chunk_end = chunk_start + len(chunk_data) - 1
-        resp = self.blob_service._update_page(
-            self.container_name,
-            self.blob_name,
-            chunk_data,
-            chunk_start,
-            chunk_end,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            if_match=self.if_match,
-            timeout=self.timeout,
-        )
-
-        if not self.parallel:
-            self.if_match = resp.etag
-
-        self.set_response_properties(resp)
-
-
-class _AppendBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if not hasattr(self, 'current_length'):
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                timeout=self.timeout,
-            )
-
-            self.current_length = resp.append_offset
-        else:
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                appendpos_condition=self.current_length + chunk_offset,
-                timeout=self.timeout,
-            )
-
-        self.set_response_properties(resp)
-
-
-class _SubStream(IOBase):
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \
-            else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, n):
-        if self.closed:
-            raise ValueError("Stream is closed.")
-
-        # adjust if out of bounds
-        if n + self._position >= self._length:
-            n = self._length - self._position
-
-        # return fast
-        if n == 0 or self._buffer.closed:
-            return b''
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(n)
-        bytes_read = len(read_buffer)
-        bytes_remaining = n - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_connections > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = - offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/appendblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/appendblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/appendblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/appendblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,561 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from ..common._common_conversion import (
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_data_bytes_only,
-    _add_metadata_headers,
-)
-from ._deserialization import (
-    _parse_append_block,
-    _parse_base_properties,
-)
-from ._serialization import (
-    _get_path,
-)
-from ._upload_chunking import (
-    _AppendBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-    ResourceProperties
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class AppendBlobService(BaseBlobService):
-    '''
-    An append blob is comprised of blocks and is optimized for append operations.
-    When you modify an append blob, blocks are added to the end of the blob only,
-    via the append_block operation. Updating or deleting of existing blocks is not
-    supported. Unlike a block blob, an append blob does not expose its block IDs. 
-
-    Each block in an append blob can be a different size, up to a maximum of 4 MB,
-    and an append blob can include up to 50,000 blocks. The maximum size of an
-    append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
-
-    :ivar int MAX_BLOCK_SIZE: 
-        The size of the blocks put by append_blob_from_* methods. Smaller blocks 
-        may be put if there is less data provided. The maximum block size the service 
-        supports is 4MB.
-    '''
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        self.blob_type = _BlobTypes.AppendBlob
-        super(AppendBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout)
-
-    def create_blob(self, container_name, blob_name, content_settings=None,
-                    metadata=None, lease_id=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a blob or overrides an existing blob. Use if_match=* to
-        prevent overriding an existing blob. 
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to
-            perform the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def append_block(self, container_name, blob_name, block,
-                     validate_content=False, maxsize_condition=None,
-                     appendpos_condition=None,
-                     lease_id=None, if_modified_since=None,
-                     if_unmodified_since=None, if_match=None,
-                     if_none_match=None, timeout=None):
-        '''
-        Commits a new block of data to the end of an existing append blob.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes block:
-            Content of the block in bytes.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            ETag, last modified, append offset, and committed block count 
-            properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'appendblock',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
-            'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        request.body = _get_data_bytes_only('block', block)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_append_block)
-
-    # ----Convenience APIs----------------------------------------------
-
-    def append_blob_from_path(
-            self, container_name, blob_name, file_path, validate_content=False,
-            maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from a file path, with automatic
-        chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.append_blob_from_stream(
-                container_name,
-                blob_name,
-                stream,
-                count=count,
-                validate_content=validate_content,
-                maxsize_condition=maxsize_condition,
-                progress_callback=progress_callback,
-                lease_id=lease_id,
-                timeout=timeout)
-
-    def append_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from an array of bytes, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.append_blob_from_stream(
-            container_name,
-            blob_name,
-            stream,
-            count=count,
-            validate_content=validate_content,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            timeout=timeout)
-
-    def append_blob_from_text(
-            self, container_name, blob_name, text, encoding='utf-8',
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from str/unicode, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        return self.append_blob_from_bytes(
-            container_name,
-            blob_name,
-            text,
-            index=0,
-            count=len(text),
-            validate_content=validate_content,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            timeout=timeout)
-
-    def append_blob_from_stream(
-            self, container_name, blob_name, stream, count=None,
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from a file/stream, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        # _upload_blob_chunks returns the block ids for block blobs so resource_properties
-        # is passed as a parameter to get the last_modified and etag for page and append blobs.
-        # this info is not needed for block_blobs since _put_block_list is called after which gets this info
-        resource_properties = ResourceProperties()
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_BLOCK_SIZE,
-            stream=stream,
-            max_connections=1,  # upload not easily parallelizable
-            progress_callback=progress_callback,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            uploader_class=_AppendBlobChunkUploader,
-            maxsize_condition=maxsize_condition,
-            timeout=timeout,
-            resource_properties=resource_properties
-        )
-
-        return resource_properties
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/baseblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/baseblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/baseblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/baseblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,3171 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-from abc import ABCMeta
-
-from azure.common import AzureHttpError
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-    _StorageNoAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _parse_metadata,
-    _parse_properties,
-    _convert_xml_to_service_stats,
-    _parse_length_from_content_range,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_decryption_required,
-    _validate_access_policies,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    BlobSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_containers,
-    _parse_blob,
-    _convert_xml_to_blob_list,
-    _parse_container,
-    _parse_snapshot_blob,
-    _parse_lease,
-    _convert_xml_to_signed_identifiers_and_access,
-    _parse_base_properties,
-)
-from ._download_chunking import _download_blob_chunks
-from ._error import (
-    _ERROR_INVALID_LEASE_DURATION,
-    _ERROR_INVALID_LEASE_BREAK_PERIOD,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from .models import (
-    BlobProperties,
-    _LeaseActions,
-    ContainerPermissions,
-    BlobPermissions,
-)
-
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class BaseBlobService(StorageClient):
-    '''
-    This is the main class managing Blob resources.
-
-    The Blob service stores text and binary data as blobs in the cloud.
-    The Blob service offers the following three resources: the storage account,
-    containers, and blobs. Within your storage account, containers provide a
-    way to organize sets of blobs. For more information please see:
-    https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx
-
-    :ivar int MAX_SINGLE_GET_SIZE:
-        The size of the first range get performed by get_blob_to_* methods if
-        max_connections is greater than 1. Less data will be returned if the
-        blob is smaller than this.
-    :ivar int MAX_CHUNK_GET_SIZE:
-        The size of subsequent range gets performed by get_blob_to_* methods if
-        max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE.
-        Less data will be returned if the remainder of the blob is smaller than
-        this. If this is set to larger than 4MB, content_validation will throw an
-        error if enabled. However, if content_validation is not desired a size
-        greater than 4MB may be optimal. Setting this below 4MB is not recommended.
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    __metaclass__ = ABCMeta
-    MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'blob',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            custom_domain=custom_domain,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(BaseBlobService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            self.authentication = _StorageNoAuthentication()
-
-        self.require_encryption = False
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None):
-        '''
-        Creates the url to access a blob.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when BaseBlobService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :param str snapshot:
-            An string value that uniquely identifies the snapshot. The value of
-            this query parameter indicates the snapshot version.
-        :return: blob access URL.
-        :rtype: str
-        '''
-
-        url = '{}://{}/{}/{}'.format(
-            protocol or self.protocol,
-            self.primary_endpoint,
-            container_name,
-            blob_name,
-        )
-
-        if snapshot and sas_token:
-            url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token)
-        elif snapshot:
-            url = '{}?snapshot={}'.format(url, snapshot)
-        elif sas_token:
-            url = '{}?{}'.format(url, sas_token)
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the blob service.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.BLOB, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_container_shared_access_signature(self, container_name,
-                                                   permission=None, expiry=None,
-                                                   start=None, id=None, ip=None, protocol=None,
-                                                   cache_control=None, content_disposition=None,
-                                                   content_encoding=None, content_language=None,
-                                                   content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_container(
-            container_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_blob_shared_access_signature(
-            self, container_name, blob_name, permission=None,
-            expiry=None, start=None, id=None, ip=None, protocol=None,
-            cache_control=None, content_disposition=None,
-            content_encoding=None, content_language=None,
-            content_type=None):
-        '''
-        Generates a shared access signature for the blob.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_container_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_blob(
-            container_name,
-            blob_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def list_containers(self, prefix=None, num_results=None, include_metadata=False,
-                        marker=None, timeout=None):
-        '''
-        Returns a generator to list the containers under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all containers have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        containers, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        include = 'metadata' if include_metadata else None
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_containers(**kwargs)
-
-        return ListGenerator(resp, self._list_containers, (), kwargs)
-
-    def _list_containers(self, prefix=None, marker=None, max_results=None,
-                         include=None, timeout=None, _context=None):
-        '''
-        Returns a list of the containers under the specified account.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body. set this
-            parameter to string 'metadata' to get container's metadata.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_containers, operation_context=_context)
-
-    def create_container(self, container_name, metadata=None,
-                         public_access=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails if
-        fail_on_exist is True.
-
-        :param str container_name:
-            Name of container to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            container as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str)
-        :param ~azure.storage.blob.models.PublicAccess public_access:
-            Possible values include: container, blob.
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the container exists.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is created, False if container already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-public-access': _to_str(public_access)
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_container_properties(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata and system properties for the specified
-        container. The data returned does not include the container's list of blobs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_properties only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified container within a container object.
-        :rtype: :class:`~azure.storage.blob.models.Container`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _parse_container, [container_name])
-
-    def get_container_metadata(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the container metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_container_metadata(self, container_name, metadata=None,
-                               lease_id=None, if_modified_since=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        container. Each call to this operation replaces all existing metadata
-        attached to the container. To remove all metadata from the container,
-        call this operation with no metadata dict.
-
-        :param str container_name:
-            Name of existing container.
-        :param metadata:
-            A dict containing name-value pairs to associate with the container as 
-            metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            If specified, set_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def get_container_acl(self, container_name, lease_id=None, timeout=None):
-        '''
-        Gets the permissions for the specified container.
-        The permissions indicate whether container data may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param lease_id:
-            If specified, get_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the container. dict of str to
-            :class:`azure.storage.common.models.AccessPolicy` and a public_access property
-            if public access is turned on
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access)
-
-    def set_container_acl(self, container_name, signed_identifiers=None,
-                          public_access=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Sets the permissions for the specified container or stored access 
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the container. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param ~azure.storage.blob.models.PublicAccess public_access:
-            Possible values include: container, blob.
-        :param str lease_id:
-            If specified, set_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified date/time.
-        :param datetime if_unmodified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-public-access': _to_str(public_access),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def delete_container(self, container_name, fail_not_exist=False,
-                         lease_id=None, if_modified_since=None,
-                         if_unmodified_since=None, timeout=None):
-        '''
-        Marks the specified container for deletion. The container and any blobs
-        contained within it are later deleted during garbage collection.
-
-        :param str container_name:
-            Name of container to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the container doesn't
-            exist.
-        :param str lease_id:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is deleted, False container doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def _lease_container_impl(
-            self, container_name, lease_action, lease_id, lease_duration,
-            lease_break_period, proposed_lease_id, if_modified_since,
-            if_unmodified_since, timeout):
-        '''
-        Establishes and manages a lease on a container.
-        The Lease Container operation can be called in one of five modes
-            Acquire, to request a new lease
-            Renew, to renew an existing lease
-            Change, to change the ID of an existing lease
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the container
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_action:
-            Possible _LeaseActions values: acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the container has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. For backwards compatibility, the default is
-            60, and the value is only used on an acquire operation.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for Acquire, required for Change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'lease',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-lease-action': _to_str(lease_action),
-            'x-ms-lease-duration': _to_str(lease_duration),
-            'x-ms-lease-break-period': _to_str(lease_break_period),
-            'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-        }
-
-        return self._perform_request(request, _parse_lease)
-
-    def acquire_container_lease(
-            self, container_name, lease_duration=-1, proposed_lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Requests a new lease. If the container does not have an active lease,
-        the Blob service creates a lease on the container and returns a new
-        lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-        if lease_duration != -1 and \
-                (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Acquire,
-                                           None,  # lease_id
-                                           lease_duration,
-                                           None,  # lease_break_period
-                                           proposed_lease_id,
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['id']
-
-    def renew_container_lease(
-            self, container_name, lease_id, if_modified_since=None,
-            if_unmodified_since=None, timeout=None):
-        '''
-        Renews the lease. The lease can be renewed if the lease ID specified
-        matches that associated with the container. Note that
-        the lease may be renewed even if it has expired as long as the container
-        has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Renew,
-                                           lease_id,
-                                           None,  # lease_duration
-                                           None,  # lease_break_period
-                                           None,  # proposed_lease_id
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['id']
-
-    def release_container_lease(
-            self, container_name, lease_id, if_modified_since=None,
-            if_unmodified_since=None, timeout=None):
-        '''
-        Release the lease. The lease may be released if the lease_id specified matches
-        that associated with the container. Releasing the lease allows another client
-        to immediately acquire the lease for the container as soon as the release is complete. 
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name,
-                                   _LeaseActions.Release,
-                                   lease_id,
-                                   None,  # lease_duration
-                                   None,  # lease_break_period
-                                   None,  # proposed_lease_id
-                                   if_modified_since,
-                                   if_unmodified_since,
-                                   timeout)
-
-    def break_container_lease(
-            self, container_name, lease_break_period=None,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Break the lease, if the container has an active lease. Once a lease is
-        broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired. 
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Break,
-                                           None,  # lease_id
-                                           None,  # lease_duration
-                                           lease_break_period,
-                                           None,  # proposed_lease_id
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['time']
-
-    def change_container_lease(
-            self, container_name, lease_id, proposed_lease_id,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Change the lease ID of an active lease. A change must include the current
-        lease ID and a new lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name,
-                                   _LeaseActions.Change,
-                                   lease_id,
-                                   None,  # lease_duration
-                                   None,  # lease_break_period
-                                   proposed_lease_id,
-                                   if_modified_since,
-                                   if_unmodified_since,
-                                   timeout)
-
-    def list_blobs(self, container_name, prefix=None, num_results=None, include=None,
-                   delimiter=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all blobs have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        blobs, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`BlobPrefix` elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param ~azure.storage.blob.models.Include include:
-            Specifies one or more additional datasets to include in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
-            result list that acts as a placeholder for all blobs whose names begin
-            with the same substring up to the appearance of the delimiter character.
-            The delimiter may be a single character or a string.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (container_name,)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'delimiter': delimiter, 'timeout': timeout,
-                  '_context': operation_context}
-        resp = self._list_blobs(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_blobs, args, kwargs)
-
-    def _list_blobs(self, container_name, prefix=None, marker=None,
-                    max_results=None, include=None, delimiter=None, timeout=None,
-                    _context=None):
-        '''
-        Returns the list of blobs under the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :parm str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str include:
-            Specifies one or more datasets to include in the
-            response. To specify more than one of these options on the URI,
-            you must separate each option with a comma. Valid values are:
-                snapshots:
-                    Specifies that snapshots should be included in the
-                    enumeration. Snapshots are listed from oldest to newest in
-                    the response.
-                metadata:
-                    Specifies that blob metadata be returned in the response.
-                uncommittedblobs:
-                    Specifies that blobs for which blocks have been uploaded,
-                    but which have not been committed using Put Block List
-                    (REST API), be included in the response.
-                copy:
-                    Version 2012-02-12 and newer. Specifies that metadata
-                    related to any current or previous Copy Blob operation
-                    should be included in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a
-            placeholder for all blobs whose names begin with the same
-            substring up to the appearance of the delimiter character. The
-            delimiter may be a single character or a string.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'delimiter': _to_str(delimiter),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context)
-
-    def get_blob_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Blob service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def set_blob_service_properties(
-            self, logging=None, hour_metrics=None, minute_metrics=None,
-            cors=None, target_version=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Blob service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param Logging logging:
-            Groups the Azure Analytics Logging settings.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for blobs.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for blobs.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param string target_version:
-            Indicates the default version to use for requests if an incoming 
-            request's version is not specified. 
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version))
-
-        self._perform_request(request)
-
-    def get_blob_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service properties.
-        :rtype:
-            :class:`~azure.storage.common.models.ServiceProperties` with an attached
-            target_version property
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def get_blob_properties(
-            self, container_name, blob_name, snapshot=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the blob. It does not return the content of the blob.
-        Returns :class:`~azure.storage.blob.models.Blob`
-        with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: a blob object including properties and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_blob, [blob_name, snapshot])
-
-    def set_blob_properties(
-            self, container_name, blob_name, content_settings=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Sets system properties on the blob. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def exists(self, container_name, blob_name=None, snapshot=None, timeout=None):
-        '''
-        Returns a boolean indicating whether the container exists (if blob_name 
-        is None), or otherwise a boolean indicating whether the blob exists.
-
-        :param str container_name:
-            Name of a container.
-        :param str blob_name:
-            Name of a blob. If None, the container will be checked for existence.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the snapshot.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        try:
-            if blob_name is None:
-                self.get_container_properties(container_name, timeout=timeout)
-            else:
-                self.get_blob_properties(container_name, blob_name, snapshot=snapshot, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def _get_blob(
-            self, container_name, blob_name, snapshot=None, start_range=None,
-            end_range=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            _context=None):
-        '''
-        Downloads a blob's content, metadata, and properties. You can also
-        call this API to read a snapshot. You can specify a range if you don't
-        need to download the blob in its entirety. If no range is specified,
-        the full blob will be downloaded.
-
-        See get_blob_to_* for high level functions that handle the download
-        of large blobs with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            When this is set to True and specified together with the Range header, 
-            the service returns the MD5 hash for the range, as long as the range 
-            is less than or equal to 4 MB in size.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A Blob with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_decryption_required(self.require_encryption,
-                                      self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        start_offset, end_offset = 0, 0
-        if self.key_encryption_key is not None or self.key_resolver_function is not None:
-            if start_range is not None:
-                # Align the start of the range along a 16 byte block
-                start_offset = start_range % 16
-                start_range -= start_offset
-
-                # Include an extra 16 bytes for the IV if necessary
-                # Because of the previous offsetting, start_range will always
-                # be a multiple of 16.
-                if start_range > 0:
-                    start_offset += 16
-                    start_range -= 16
-
-            if end_range is not None:
-                # Align the end of the range along a 16 byte block
-                end_offset = 15 - (end_range % 16)
-                end_range += end_offset
-
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=validate_content)
-
-        return self._perform_request(request, _parse_blob,
-                                     [blob_name, snapshot, validate_content, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function,
-                                      start_offset, end_offset],
-                                     operation_context=_context)
-
-    def get_blob_to_path(
-            self, container_name, blob_name, file_path, open_mode='wb',
-            snapshot=None, start_range=None, end_range=None,
-            validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None):
-        '''
-        Downloads a blob to a file path, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str file_path:
-            Path of file to write out to.
-        :param str open_mode:
-            Mode to use when opening the file. Note that specifying append only 
-            open_mode prevents parallel download. So, max_connections must be set 
-            to 1 if this open_mode is used.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        if max_connections > 1 and 'a' in open_mode:
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        with open(file_path, open_mode) as stream:
-            blob = self.get_blob_to_stream(
-                container_name,
-                blob_name,
-                stream,
-                snapshot,
-                start_range,
-                end_range,
-                validate_content,
-                progress_callback,
-                max_connections,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout)
-
-        return blob
-
-    def get_blob_to_stream(
-            self, container_name, blob_name, stream, snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-
-        '''
-        Downloads a blob to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param io.IOBase stream:
-            Opened stream to write to.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-
-        # If the user explicitly sets max_connections to 1, do a single shot download
-        if max_connections == 1:
-            blob = self._get_blob(container_name,
-                                  blob_name,
-                                  snapshot,
-                                  start_range=start_range,
-                                  end_range=end_range,
-                                  validate_content=validate_content,
-                                  lease_id=lease_id,
-                                  if_modified_since=if_modified_since,
-                                  if_unmodified_since=if_unmodified_since,
-                                  if_match=if_match,
-                                  if_none_match=if_none_match,
-                                  timeout=timeout)
-
-            # Set the download size
-            download_size = blob.properties.content_length
-
-        # If max_connections is greater than 1, do the first get to establish the 
-        # size of the blob and get the first segment of data
-        else:
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-            # The service only provides transactional MD5s for chunks under 4MB.           
-            # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first 
-            # chunk so a transactional MD5 can be retrieved.
-            first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
-
-            initial_request_start = start_range if start_range else 0
-
-            if end_range and end_range - start_range < first_get_size:
-                initial_request_end = end_range
-            else:
-                initial_request_end = initial_request_start + first_get_size - 1
-
-            # Send a context object to make sure we always retry to the initial location
-            operation_context = _OperationContext(location_lock=True)
-            try:
-                blob = self._get_blob(container_name,
-                                      blob_name,
-                                      snapshot,
-                                      start_range=initial_request_start,
-                                      end_range=initial_request_end,
-                                      validate_content=validate_content,
-                                      lease_id=lease_id,
-                                      if_modified_since=if_modified_since,
-                                      if_unmodified_since=if_unmodified_since,
-                                      if_match=if_match,
-                                      if_none_match=if_none_match,
-                                      timeout=timeout,
-                                      _context=operation_context)
-
-                # Parse the total blob size and adjust the download size if ranges 
-                # were specified
-                blob_size = _parse_length_from_content_range(blob.properties.content_range)
-                if end_range:
-                    # Use the end_range unless it is over the end of the blob
-                    download_size = min(blob_size, end_range - start_range + 1)
-                elif start_range:
-                    download_size = blob_size - start_range
-                else:
-                    download_size = blob_size
-            except AzureHttpError as ex:
-                if not start_range and ex.status_code == 416:
-                    # Get range will fail on an empty blob. If the user did not 
-                    # request a range, do a regular get request in order to get 
-                    # any properties.
-                    blob = self._get_blob(container_name,
-                                          blob_name,
-                                          snapshot,
-                                          validate_content=validate_content,
-                                          lease_id=lease_id,
-                                          if_modified_since=if_modified_since,
-                                          if_unmodified_since=if_unmodified_since,
-                                          if_match=if_match,
-                                          if_none_match=if_none_match,
-                                          timeout=timeout,
-                                          _context=operation_context)
-
-                    # Set the download size to empty
-                    download_size = 0
-                else:
-                    raise ex
-
-        # Mark the first progress chunk. If the blob is small or this is a single 
-        # shot download, this is the only call
-        if progress_callback:
-            progress_callback(blob.properties.content_length, download_size)
-
-        # Write the content to the user stream  
-        # Clear blob content since output has been written to user stream   
-        if blob.content is not None:
-            stream.write(blob.content)
-            blob.content = None
-
-        # If the blob is small or single shot download was used, the download is 
-        # complete at this point. If blob size is large, use parallel download.
-        if blob.properties.content_length != download_size:
-            # Lock on the etag. This can be overriden by the user by specifying '*'
-            if_match = if_match if if_match is not None else blob.properties.etag
-
-            end_blob = blob_size
-            if end_range:
-                # Use the end_range unless it is over the end of the blob
-                end_blob = min(blob_size, end_range + 1)
-
-            _download_blob_chunks(
-                self,
-                container_name,
-                blob_name,
-                snapshot,
-                download_size,
-                self.MAX_CHUNK_GET_SIZE,
-                first_get_size,
-                initial_request_end + 1,  # start where the first download ended
-                end_blob,
-                stream,
-                max_connections,
-                progress_callback,
-                validate_content,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout,
-                operation_context
-            )
-
-            # Set the content length to the download size instead of the size of 
-            # the last range
-            blob.properties.content_length = download_size
-
-            # Overwrite the content range to the user requested range
-            blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size)
-
-            # Overwrite the content MD5 as it is the MD5 for the last range instead 
-            # of the stored MD5
-            # TODO: Set to the stored MD5 when the service returns this
-            blob.properties.content_md5 = None
-
-        return blob
-
-    def get_blob_to_bytes(
-            self, container_name, blob_name, snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Downloads a blob as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        stream = BytesIO()
-        blob = self.get_blob_to_stream(
-            container_name,
-            blob_name,
-            stream,
-            snapshot,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            lease_id,
-            if_modified_since,
-            if_unmodified_since,
-            if_match,
-            if_none_match,
-            timeout)
-
-        blob.content = stream.getvalue()
-        return blob
-
-    def get_blob_to_text(
-            self, container_name, blob_name, encoding='utf-8', snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Downloads a blob as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str encoding:
-            Python encoding to use when decoding the blob data.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('encoding', encoding)
-
-        blob = self.get_blob_to_bytes(container_name,
-                                      blob_name,
-                                      snapshot,
-                                      start_range,
-                                      end_range,
-                                      validate_content,
-                                      progress_callback,
-                                      max_connections,
-                                      lease_id,
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        blob.content = blob.content.decode(encoding)
-        return blob
-
-    def get_blob_metadata(
-            self, container_name, blob_name, snapshot=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified blob or snapshot.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the blob metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_blob_metadata(self, container_name, blob_name,
-                          metadata=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None,
-                          if_match=None, if_none_match=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified blob as one or more
-        name-value pairs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the blob. To remove all
-            metadata from the blob, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _lease_blob_impl(self, container_name, blob_name,
-                         lease_action, lease_id,
-                         lease_duration, lease_break_period,
-                         proposed_lease_id, if_modified_since,
-                         if_unmodified_since, if_match, if_none_match, timeout=None):
-        '''
-        Establishes and manages a lease on a blob for write and delete operations.
-        The Lease Blob operation can be called in one of five modes:
-            Acquire, to request a new lease.
-            Renew, to renew an existing lease.
-            Change, to change the ID of an existing lease.
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the blob.
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_action:
-            Possible _LeaseActions acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for acquire, required for change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'lease',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-lease-action': _to_str(lease_action),
-            'x-ms-lease-duration': _to_str(lease_duration),
-            'x-ms-lease-break-period': _to_str(lease_break_period),
-            'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_lease)
-
-    def acquire_blob_lease(self, container_name, blob_name,
-                           lease_duration=-1,
-                           proposed_lease_id=None,
-                           if_modified_since=None,
-                           if_unmodified_since=None,
-                           if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Requests a new lease. If the blob does not have an active lease, the Blob
-        service creates a lease on the blob and returns a new lease ID.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-
-        if lease_duration != -1 and \
-                (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Acquire,
-                                      None,  # lease_id
-                                      lease_duration,
-                                      None,  # lease_break_period
-                                      proposed_lease_id,
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['id']
-
-    def renew_blob_lease(self, container_name, blob_name,
-                         lease_id, if_modified_since=None,
-                         if_unmodified_since=None, if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Renews the lease. The lease can be renewed if the lease ID specified on
-        the request matches that associated with the blob. Note that the lease may
-        be renewed even if it has expired as long as the blob has not been modified
-        or leased again since the expiration of that lease. When you renew a lease,
-        the lease duration clock resets. 
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Renew,
-                                      lease_id,
-                                      None,  # lease_duration
-                                      None,  # lease_break_period
-                                      None,  # proposed_lease_id
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['id']
-
-    def release_blob_lease(self, container_name, blob_name,
-                           lease_id, if_modified_since=None,
-                           if_unmodified_since=None, if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Releases the lease. The lease may be released if the lease ID specified on the
-        request matches that associated with the blob. Releasing the lease allows another
-        client to immediately acquire the lease for the blob as soon as the release is complete. 
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_blob_impl(container_name,
-                              blob_name,
-                              _LeaseActions.Release,
-                              lease_id,
-                              None,  # lease_duration
-                              None,  # lease_break_period
-                              None,  # proposed_lease_id
-                              if_modified_since,
-                              if_unmodified_since,
-                              if_match,
-                              if_none_match,
-                              timeout)
-
-    def break_blob_lease(self, container_name, blob_name,
-                         lease_break_period=None,
-                         if_modified_since=None,
-                         if_unmodified_since=None,
-                         if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Breaks the lease, if the blob has an active lease. Once a lease is broken,
-        it cannot be renewed. Any authorized request can break the lease; the request
-        is not required to specify a matching lease ID. When a lease is broken,
-        the lease break period is allowed to elapse, during which time no lease operation
-        except break and release can be performed on the blob. When a lease is successfully
-        broken, the response indicates the interval in seconds until a new lease can be acquired. 
-
-        A lease that has been broken can also be released, in which case another client may
-        immediately acquire the lease on the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Break,
-                                      None,  # lease_id
-                                      None,  # lease_duration
-                                      lease_break_period,
-                                      None,  # proposed_lease_id
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['time']
-
-    def change_blob_lease(self, container_name, blob_name,
-                          lease_id,
-                          proposed_lease_id,
-                          if_modified_since=None,
-                          if_unmodified_since=None,
-                          if_match=None,
-                          if_none_match=None, timeout=None):
-        '''
-        Changes the lease ID of an active lease. A change must include the current
-        lease ID and a new lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        self._lease_blob_impl(container_name,
-                              blob_name,
-                              _LeaseActions.Change,
-                              lease_id,
-                              None,  # lease_duration
-                              None,  # lease_break_period
-                              proposed_lease_id,
-                              if_modified_since,
-                              if_unmodified_since,
-                              if_match,
-                              if_none_match,
-                              timeout)
-
-    def snapshot_blob(self, container_name, blob_name,
-                      metadata=None, if_modified_since=None,
-                      if_unmodified_since=None, if_match=None,
-                      if_none_match=None, lease_id=None, timeout=None):
-        '''
-        Creates a read-only snapshot of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Specifies a user-defined name-value pair associated with the blob.
-            If no name-value pairs are specified, the operation will copy the
-            base blob metadata to the snapshot. If one or more name-value pairs
-            are specified, the snapshot is created with the specified metadata,
-            and metadata is not copied from the base blob.
-        :type metadata: dict(str, str)
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'snapshot',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_snapshot_blob, [blob_name])
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None):
-        '''
-        Copies a blob asynchronously. This operation returns a copy operation 
-        properties object, including a copy ID you can use to check or abort the 
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation may be a block blob, an append blob, 
-        or a page blob. If the destination blob already exists, it must be of the 
-        same blob type as the source blob. Any existing destination blob will be 
-        overwritten. The destination blob cannot be modified while a copy operation 
-        is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page 
-        blob of the source blob's length, initially containing all zeroes. Then 
-        the source page ranges are enumerated, and non-empty ranges are copied. 
-
-        For a block blob or an append blob, the Blob service creates a committed 
-        blob of zero length before returning from this operation. When copying 
-        from a block blob, all committed blocks and their block IDs are copied. 
-        Uncommitted blocks are not copied. At the end of the copy operation, the 
-        destination blob will have the same committed block count as the source.
-
-        When copying from an append blob, all committed blocks are copied. At the 
-        end of the copy operation, the destination blob will have the same committed 
-        block count as the source.
-
-        For all blob types, you can call get_blob_properties on the destination 
-        blob to check the status of the copy operation. The final blob will be 
-        committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will 
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob. 
-            The value should be URL-encoded as it would appear in a request URI. 
-            If the source is in another account, the source must either be public 
-            or must be authenticated via a shared access signature. If the source 
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value 
-            pairs are specified, the operation will copy the metadata from the 
-            source blob or file to the destination blob. If one or more name-value 
-            pairs are specified, the destination blob is created with the specified 
-            metadata, and metadata is not copied from the source blob or file. 
-        :type metadata: dict(str, str)
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.  
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               None,
-                               source_if_modified_since, source_if_unmodified_since,
-                               source_if_match, source_if_none_match,
-                               destination_if_modified_since,
-                               destination_if_unmodified_since,
-                               destination_if_match,
-                               destination_if_none_match,
-                               destination_lease_id,
-                               source_lease_id, timeout,
-                               False)
-
-    def _copy_blob(self, container_name, blob_name, copy_source,
-                   metadata=None,
-                   premium_page_blob_tier=None,
-                   source_if_modified_since=None,
-                   source_if_unmodified_since=None,
-                   source_if_match=None, source_if_none_match=None,
-                   destination_if_modified_since=None,
-                   destination_if_unmodified_since=None,
-                   destination_if_match=None,
-                   destination_if_none_match=None,
-                   destination_lease_id=None,
-                   source_lease_id=None, timeout=None,
-                   incremental_copy=False):
-        '''
-        See copy_blob for more details. This helper method
-        allows for standard copies as well as incremental copies which are only supported for page blobs.
-        :param bool incremental_copy:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source', copy_source)
-
-        if copy_source.startswith('/'):
-            # Backwards compatibility for earlier versions of the SDK where
-            # the copy source can be in the following formats:
-            # - Blob in named container:
-            #     /accountName/containerName/blobName
-            # - Snapshot in named container:
-            #     /accountName/containerName/blobName?snapshot=<DateTime>
-            # - Blob in root container:
-            #     /accountName/blobName
-            # - Snapshot in root container:
-            #     /accountName/blobName?snapshot=<DateTime>
-            account, _, source = \
-                copy_source.partition('/')[2].partition('/')
-            copy_source = self.protocol + '://' + \
-                          self.primary_endpoint + '/' + source
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-
-        if incremental_copy:
-            request.query = {
-                'comp': 'incrementalcopy',
-                'timeout': _int_to_str(timeout),
-            }
-        else:
-            request.query = {'timeout': _int_to_str(timeout)}
-
-        request.headers = {
-            'x-ms-copy-source': _to_str(copy_source),
-            'x-ms-source-if-modified-since': _to_str(source_if_modified_since),
-            'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since),
-            'x-ms-source-if-match': _to_str(source_if_match),
-            'x-ms-source-if-none-match': _to_str(source_if_none_match),
-            'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since),
-            'If-Match': _to_str(destination_if_match),
-            'If-None-Match': _to_str(destination_if_none_match),
-            'x-ms-lease-id': _to_str(destination_lease_id),
-            'x-ms-source-lease-id': _to_str(source_lease_id),
-            'x-ms-access-tier': _to_str(premium_page_blob_tier)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_properties, [BlobProperties]).copy
-
-    def abort_copy_blob(self, container_name, blob_name, copy_id,
-                        lease_id=None, timeout=None):
-        '''
-         Aborts a pending copy_blob operation, and leaves a destination blob
-         with zero length and full metadata.
-
-         :param str container_name:
-             Name of destination container.
-         :param str blob_name:
-             Name of destination blob.
-         :param str copy_id:
-             Copy identifier provided in the copy.id of the original
-             copy_blob operation.
-         :param str lease_id:
-             Required if the destination blob has an active infinite lease.
-         :param int timeout:
-             The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'copy',
-            'copyid': _to_str(copy_id),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-copy-action': 'abort',
-        }
-
-        self._perform_request(request)
-
-    def delete_blob(self, container_name, blob_name, snapshot=None,
-                    lease_id=None, delete_snapshots=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Marks the specified blob or snapshot for deletion.
-        The blob is later deleted during garbage collection.
-
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the Delete
-        Blob operation.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to delete.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots:
-            Required if the blob has associated snapshots.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-delete-snapshots': _to_str(delete_snapshots),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout)
-        }
-
-        self._perform_request(request)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/blockblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/blockblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/blockblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/blockblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1012 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from io import (
-    BytesIO
-)
-from os import (
-    path,
-)
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_VALUE_SHOULD_BE_STREAM
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _get_data_bytes_only,
-    _get_data_bytes_or_stream_only,
-    _add_metadata_headers,
-)
-from ..common._serialization import (
-    _len_plus
-)
-from ._deserialization import (
-    _convert_xml_to_block_list,
-    _parse_base_properties,
-)
-from ._encryption import (
-    _encrypt_blob,
-    _generate_blob_encryption_data,
-)
-from ._serialization import (
-    _convert_block_list_to_xml,
-    _get_path,
-)
-from ._upload_chunking import (
-    _BlockBlobChunkUploader,
-    _upload_blob_chunks,
-    _upload_blob_substream_blocks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-)
-
-
-class BlockBlobService(BaseBlobService):
-    '''
-    Block blobs let you upload large blobs efficiently. Block blobs are comprised
-    of blocks, each of which is identified by a block ID. You create or modify a
-    block blob by writing a set of blocks and committing them by their block IDs.
-    Each block can be a different size, up to a maximum of 4 MB, and a block blob
-    can include up to 50,000 blocks. The maximum size of a block blob is therefore
-    slightly more than 195 GB (4 MB X 50,000 blocks). If you are writing a block
-    blob that is no more than 64 MB in size, you can upload it in its entirety with
-    a single write operation; see create_blob_from_bytes.
-
-    :ivar int MAX_SINGLE_PUT_SIZE:
-        The largest size upload supported in a single put call. This is used by
-        the create_blob_from_* methods if the content length is known and is less
-        than this value.
-    :ivar int MAX_BLOCK_SIZE:
-        The size of the blocks put by create_blob_from_* methods if the content
-        length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks
-        may be put. The maximum block size the service supports is 100MB.
-    :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD:
-        The minimum block size at which the the memory-optimized, block upload
-        algorithm is considered. This algorithm is only applicable to the create_blob_from_file and
-        create_blob_from_stream methods and will prevent the full buffering of blocks.
-        In addition to the block size, ContentMD5 validation and Encryption must be disabled as
-        these options require the blocks to be buffered.
-    '''
-
-    MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-    MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests
-            signed with an account key and to construct the storage endpoint. It
-            is required unless a connection string is given, or if a custom
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication.
-            If neither account key or sas token is specified, anonymous access
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests
-             instead of the account key. If account key and sas token are both
-             specified, account key will be used to sign. If neither are
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will
-            override all other parameters besides connection string and request
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use the China cloud
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        self.blob_type = _BlobTypes.BlockBlob
-        super(BlockBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout)
-
-    def put_block(self, container_name, blob_name, block, block_id,
-                  validate_content=False, lease_id=None, timeout=None):
-        '''
-        Creates a new block to be committed as part of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param block: Content of the block.
-        :type block: io.IOBase or bytes
-            Content of the block.
-        :param str block_id:
-            A valid Base64 string value that identifies the block. Prior to
-            encoding, the string must be less than or equal to 64 bytes in size.
-            For a given blob, the length of the value specified for the blockid
-            parameter must be the same size for each block. Note that the Base64
-            string must be URL-encoded.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        self._put_block(
-            container_name,
-            blob_name,
-            block,
-            block_id,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            timeout=timeout
-        )
-
-    def put_block_list(
-            self, container_name, blob_name, block_list, content_settings=None,
-            metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None):
-        '''
-        Writes a blob by specifying the list of block IDs that make up the blob.
-        In order to be written as part of a blob, a block must have been
-        successfully written to the server in a prior Put Block operation.
-
-        You can call Put Block List to update a blob by uploading only those
-        blocks that have changed, then committing the new and existing blocks
-        together. You can do this by specifying whether to commit a block from
-        the committed block list or from the uncommitted block list, or to commit
-        the most recently uploaded version of the block, whichever list it may
-        belong to.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param block_list:
-            A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state.
-        :type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`)
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block list content. The storage
-            service checks the hash of the block list content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this check is associated with
-            the block list content, and not with the content of the blob itself.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._put_block_list(
-            container_name,
-            blob_name,
-            block_list,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def get_block_list(self, container_name, blob_name, snapshot=None,
-                       block_list_type=None, lease_id=None, timeout=None):
-        '''
-        Retrieves the list of blocks that have been uploaded as part of a
-        block blob. There are two block lists maintained for a blob:
-            Committed Block List:
-                The list of blocks that have been successfully committed to a
-                given blob with Put Block List.
-            Uncommitted Block List:
-                The list of blocks that have been uploaded for a blob using
-                Put Block, but that have not yet been committed. These blocks
-                are stored in Azure in association with a blob, but do not yet
-                form part of the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            Datetime to determine the time to retrieve the blocks.
-        :param str block_list_type:
-            Specifies whether to return the list of committed blocks, the list
-            of uncommitted blocks, or both lists together. Valid values are:
-            committed, uncommitted, or all.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: list committed and/or uncommitted blocks for Block Blob
-        :rtype: :class:`~azure.storage.blob.models.BlobBlockList`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'blocklist',
-            'snapshot': _to_str(snapshot),
-            'blocklisttype': _to_str(block_list_type),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _convert_xml_to_block_list)
-
-    # ----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-            self, container_name, blob_name, file_path, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-    def create_blob_from_stream(
-            self, container_name, blob_name, stream, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None, use_byte_buffer=False):
-        '''
-        Creates a new blob from a file/stream, or updates the content of
-        an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB. Note that parallel upload requires the stream to be seekable.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :param bool use_byte_buffer:
-            If True, this will force usage of the original full block buffering upload path.
-            By default, this value is False and will employ a memory-efficient,
-            streaming upload algorithm under the following conditions:
-            The provided stream is seekable, 'require_encryption' is False, and
-            MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD.
-            One should consider the drawbacks of using this approach. In order to achieve
-            memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks
-            using a SubStream wrapper. In order to read the correct data, each SubStream must acquire
-            a lock so that it can safely seek to the right position on the shared, underlying stream.
-            If max_connections > 1, the concurrency will result in a considerable amount of seeking on
-            the underlying stream. For the most common inputs such as a file-like stream object, seeking
-            is an inexpensive operation and this is not much of a concern. However, for other variants of streams
-            this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking
-            with your input stream.
-            The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of
-            seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        # Adjust count to include padding if we are expected to encrypt.
-        adjusted_count = count
-        if (self.key_encryption_key is not None) and (adjusted_count is not None):
-            adjusted_count += (16 - (count % 16))
-
-        # Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE
-        if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE):
-            if progress_callback:
-                progress_callback(0, count)
-
-            data = stream.read(count)
-            resp = self._put_blob(
-                container_name=container_name,
-                blob_name=blob_name,
-                blob=data,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-            if progress_callback:
-                progress_callback(count, count)
-
-            return resp
-        else:  # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls
-            cek, iv, encryption_data = None, None, None
-
-            use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \
-                                       self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \
-                                       hasattr(stream, 'seekable') and not stream.seekable() or \
-                                       not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
-
-            if use_original_upload_path:
-                if self.key_encryption_key:
-                    cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
-
-                block_ids = _upload_blob_chunks(
-                    blob_service=self,
-                    container_name=container_name,
-                    blob_name=blob_name,
-                    blob_size=count,
-                    block_size=self.MAX_BLOCK_SIZE,
-                    stream=stream,
-                    max_connections=max_connections,
-                    progress_callback=progress_callback,
-                    validate_content=validate_content,
-                    lease_id=lease_id,
-                    uploader_class=_BlockBlobChunkUploader,
-                    timeout=timeout,
-                    content_encryption_key=cek,
-                    initialization_vector=iv
-                )
-            else:
-                block_ids = _upload_blob_substream_blocks(
-                    blob_service=self,
-                    container_name=container_name,
-                    blob_name=blob_name,
-                    blob_size=count,
-                    block_size=self.MAX_BLOCK_SIZE,
-                    stream=stream,
-                    max_connections=max_connections,
-                    progress_callback=progress_callback,
-                    validate_content=validate_content,
-                    lease_id=lease_id,
-                    uploader_class=_BlockBlobChunkUploader,
-                    timeout=timeout,
-                )
-
-            return self._put_block_list(
-                container_name=container_name,
-                blob_name=blob_name,
-                block_list=block_ids,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout,
-                encryption_data=encryption_data
-            )
-
-    def create_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            use_byte_buffer=True
-        )
-
-    def create_blob_from_text(
-            self, container_name, blob_name, text, encoding='utf-8',
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from str/unicode, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        return self.create_blob_from_bytes(
-            container_name=container_name,
-            blob_name=blob_name,
-            blob=text,
-            index=0,
-            count=len(text),
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout)
-
-    def set_standard_blob_tier(
-        self, container_name, blob_name, standard_blob_tier, timeout=None):
-        '''
-        Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to update.
-        :param StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('standard_blob_tier', standard_blob_tier)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'tier',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-access-tier': _to_str(standard_blob_tier)
-        }
-
-        self._perform_request(request)
-
-    # -----Helper methods------------------------------------
-    def _put_blob(self, container_name, blob_name, blob, content_settings=None,
-                  metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-                  if_unmodified_since=None, if_match=None, if_none_match=None,
-                  timeout=None):
-        '''
-        Creates a blob or updates an existing blob.
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as bytes (size < 64MB). For larger size, you
-            must call put_block and put_block_list to set content of blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the blob content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the new Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-        blob = _get_data_bytes_only('blob', blob)
-        if self.key_encryption_key:
-            encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key)
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-        request.body = blob
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _put_block(self, container_name, blob_name, block, block_id,
-                   validate_content=False, lease_id=None, timeout=None):
-        '''
-        See put_block for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_not_none('block_id', block_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'block',
-            'blockid': _encode_base64(_to_str(block_id)),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        request.body = _get_data_bytes_or_stream_only('block', block)
-        if hasattr(request.body, 'read'):
-            if _len_plus(request.body) is None:
-                try:
-                    data = b''
-                    for chunk in iter(lambda: request.body.read(4096), b""):
-                        data += chunk
-                    request.body = data
-                except AttributeError:
-                    raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body'))
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        self._perform_request(request)
-
-    def _put_block_list(
-            self, container_name, blob_name, block_list, content_settings=None,
-            metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None, encryption_data=None):
-        '''
-        See put_block_list for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        :param str encryption_data:
-            A JSON formatted string containing the encryption metadata generated for this 
-            blob if it was encrypted all at once upon upload. This should only be passed
-            in by internal methods.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block_list', block_list)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'blocklist',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-        request.body = _get_request_body(
-            _convert_block_list_to_xml(block_list))
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        if encryption_data is not None:
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-
-        return self._perform_request(request, _parse_base_properties)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/models.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,745 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _to_str
-
-
-class Container(object):
-    '''
-    Blob container class. 
-    
-    :ivar str name: 
-        The name of the container.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the container as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list containers operation. If this parameter was specified but the 
-        container has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    :ivar ContainerProperties properties:
-        System properties for the container.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or ContainerProperties()
-        self.metadata = metadata
-
-
-class ContainerProperties(object):
-    '''
-    Blob container's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the container was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar LeaseProperties lease:
-        Stores all the lease information for the container.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.lease = LeaseProperties()
-        self.public_access = None
-
-
-class Blob(object):
-    '''
-    Blob class.
-    
-    :ivar str name:
-        Name of blob.
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    :ivar content:
-        Blob content.
-    :vartype content: str or bytes
-    :ivar BlobProperties properties:
-        Stores all the system properties for the blob.
-    :ivar metadata:
-        Name-value pairs associated with the blob as metadata.
-    '''
-
-    def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None):
-        self.name = name
-        self.snapshot = snapshot
-        self.content = content
-        self.properties = props or BlobProperties()
-        self.metadata = metadata
-
-
-class BlobProperties(object):
-    '''
-    Blob Properties
-    
-    :ivar str blob_type:
-        String indicating this blob's type.
-    :ivar datetime last_modified:
-        A datetime object representing the last time the blob was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The length of the content returned. If the entire blob was requested, 
-        the length of blob in bytes. If a subset of the blob was requested, the 
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client 
-        requested a subset of the blob.
-    :ivar int append_blob_committed_block_count:
-        (For Append Blobs) Number of committed blocks in the blob.
-    :ivar int page_blob_sequence_number:
-        (For Page Blobs) Sequence number for page blob used for coordinating
-        concurrent writes.
-    :ivar bool server_encrypted:
-        Set to true if the blob is encrypted on the server.
-    :ivar ~azure.storage.blob.models.CopyProperties copy:
-        Stores all the copy properties for the blob.
-    :ivar ~azure.storage.blob.models.ContentSettings content_settings:
-        Stores all the content settings for the blob.
-    :ivar ~azure.storage.blob.models.LeaseProperties lease:
-        Stores all the lease information for the blob.
-    :ivar StandardBlobTier blob_tier:
-        Indicates the access tier of the blob. The hot tier is optimized
-        for storing data that is accessed frequently. The cool storage tier
-        is optimized for storing data that is infrequently accessed and stored
-        for at least a month. The archive tier is optimized for storing
-        data that is rarely accessed and stored for at least six months
-        with flexible latency requirements.
-    :ivar datetime blob_tier_change_time:
-        Indicates when the access tier was last changed.
-    :ivar bool blob_tier_inferred:
-        Indicates whether the access tier was inferred by the service.
-        If false, it indicates that the tier was set explicitly.
-    '''
-
-    def __init__(self):
-        self.blob_type = None
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_range = None
-        self.append_blob_committed_block_count = None
-        self.page_blob_sequence_number = None
-        self.server_encrypted = None
-        self.copy = CopyProperties()
-        self.content_settings = ContentSettings()
-        self.lease = LeaseProperties()
-        self.blob_tier = None
-        self.blob_tier_change_time = None
-        self.blob_tier_inferred = False
-
-
-class ContentSettings(object):
-    '''
-    Used to store the content settings of a blob.
-    
-    :ivar str content_type:
-        The content type specified for the blob. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If the content_encoding has previously been set
-        for the blob, that value is stored.
-    :ivar str content_language:
-        If the content_language has previously been set
-        for the blob, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the blob, that value is stored.
-    :ivar str cache_control:
-        If the cache_control has previously been set for
-        the blob, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the blob, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None):
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return {
-            'x-ms-blob-cache-control': _to_str(self.cache_control),
-            'x-ms-blob-content-type': _to_str(self.content_type),
-            'x-ms-blob-content-disposition': _to_str(self.content_disposition),
-            'x-ms-blob-content-md5': _to_str(self.content_md5),
-            'x-ms-blob-content-encoding': _to_str(self.content_encoding),
-            'x-ms-blob-content-language': _to_str(self.content_language),
-        }
-
-
-class CopyProperties(object):
-    '''
-    Blob Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy Blob operation where this blob
-        was the destination blob. This header does not appear if this blob has never
-        been the destination in a Copy Blob operation, or if this blob has been
-        modified after a concluded Copy Blob operation using Set Blob Properties,
-        Put Blob, or Put Block List.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source blob used in the last attempted
-        Copy Blob operation where this blob was the destination blob. This header does not
-        appear if this blob has never been the destination in a Copy Blob operation, or if
-        this blob has been modified after a concluded Copy Blob operation using
-        Set Blob Properties, Put Blob, or Put Block List.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending:
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy Blob.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy Blob operation where this blob was the destination blob. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy Blob operation where this blob was the
-        destination blob. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class LeaseProperties(object):
-    '''
-    Blob Lease Properties.
-    
-    :ivar str status:
-        The lease status of the blob.
-        Possible values: locked|unlocked
-    :ivar str state:
-        Lease state of the blob.
-        Possible values: available|leased|expired|breaking|broken
-    :ivar str duration:
-        When a blob is leased, specifies whether the lease is of infinite or fixed duration.
-    '''
-
-    def __init__(self):
-        self.status = None
-        self.state = None
-        self.duration = None
-
-
-class BlobPrefix(object):
-    '''
-    BlobPrefix objects may potentially returned in the blob list when 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is 
-    used with a delimiter. Prefixes can be thought of as virtual blob directories.
-    
-    :ivar str name: The name of the blob prefix.
-    '''
-
-    def __init__(self):
-        self.name = None
-
-
-class BlobBlockState(object):
-    '''Block blob block types.'''
-
-    Committed = 'Committed'
-    '''Committed blocks.'''
-
-    Latest = 'Latest'
-    '''Latest blocks.'''
-
-    Uncommitted = 'Uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class BlobBlock(object):
-    '''
-    BlockBlob Block class.
-    
-    :ivar str id:
-        Block id.
-    :ivar str state:
-        Block state.
-        Possible valuse: committed|uncommitted
-    :ivar int size:
-        Block size in bytes.
-    '''
-
-    def __init__(self, id=None, state=BlobBlockState.Latest):
-        self.id = id
-        self.state = state
-
-    def _set_size(self, size):
-        self.size = size
-
-
-class BlobBlockList(object):
-    '''
-    Blob Block List class.
-   
-    :ivar committed_blocks:
-        List of committed blocks.
-    :vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
-    :ivar uncommitted_blocks:
-        List of uncommitted blocks.
-    :vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
-    '''
-
-    def __init__(self):
-        self.committed_blocks = list()
-        self.uncommitted_blocks = list()
-
-
-class PageRange(object):
-    '''
-    Page Range for page blob.
-    
-    :ivar int start:
-        Start of page range in bytes.
-    :ivar int end:
-        End of page range in bytes.
-    :ivar bool is_cleared:
-        Indicates if a page range is cleared or not. Only applicable
-        for get_page_range_diff API.
-    '''
-
-    def __init__(self, start=None, end=None, is_cleared=False):
-        self.start = start
-        self.end = end
-        self.is_cleared = is_cleared
-
-
-class ResourceProperties(object):
-    '''
-    Base response for a resource request.
-    
-    :ivar str etag:
-        Opaque etag value that can be used to check if resource
-        has been modified.
-    :ivar datetime last_modified:
-        Datetime for last time resource was modified.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-
-
-class AppendBlockProperties(ResourceProperties):
-    '''
-    Response for an append block request.
-    
-    :ivar int append_offset:
-        Position to start next append.
-    :ivar int committed_block_count:
-        Number of committed append blocks.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.append_offset = None
-        self.committed_block_count = None
-
-
-class PageBlobProperties(ResourceProperties):
-    '''
-    Response for a page request.
-    
-    :ivar int sequence_number:
-        Identifer for page blobs to help handle concurrent writes.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.sequence_number = None
-
-
-class PublicAccess(object):
-    '''
-    Specifies whether data in the container may be accessed publicly and the level of access.
-    '''
-
-    OFF = 'off'
-    '''
-    Specifies that there is no public read access for both the container and blobs within the container.
-    Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
-    '''
-
-    Blob = 'blob'
-    '''
-    Specifies public read access for blobs. Blob data within this container can be read 
-    via anonymous request, but container data is not available. Clients cannot enumerate 
-    blobs within the container via anonymous request.
-    '''
-
-    Container = 'container'
-    '''
-    Specifies full public read access for container and blob data. Clients can enumerate 
-    blobs within the container via anonymous request, but cannot enumerate containers 
-    within the storage account.
-    '''
-
-
-class DeleteSnapshot(object):
-    '''
-    Required if the blob has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the base blob and all of its snapshots.
-    '''
-
-    Only = 'only'
-    '''
-    Delete only the blob's snapshots and not the blob itself.
-    '''
-
-
-class BlockListType(object):
-    '''
-    Specifies whether to return the list of committed blocks, the list of uncommitted 
-    blocks, or both lists together.
-    '''
-
-    All = 'all'
-    '''Both committed and uncommitted blocks.'''
-
-    Committed = 'committed'
-    '''Committed blocks.'''
-
-    Uncommitted = 'uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class SequenceNumberAction(object):
-    '''Sequence number actions.'''
-
-    Increment = 'increment'
-    '''
-    Increments the value of the sequence number by 1. If specifying this option, 
-    do not include the x-ms-blob-sequence-number header.
-    '''
-
-    Max = 'max'
-    '''
-    Sets the sequence number to be the higher of the value included with the 
-    request and the value currently stored for the blob.
-    '''
-
-    Update = 'update'
-    '''Sets the sequence number to the value included with the request.'''
-
-
-class _LeaseActions(object):
-    '''Actions for a lease.'''
-
-    Acquire = 'acquire'
-    '''Acquire the lease.'''
-
-    Break = 'break'
-    '''Break the lease.'''
-
-    Change = 'change'
-    '''Change the lease ID.'''
-
-    Release = 'release'
-    '''Release the lease.'''
-
-    Renew = 'renew'
-    '''Renew the lease.'''
-
-
-class _BlobTypes(object):
-    '''Blob type options.'''
-
-    AppendBlob = 'AppendBlob'
-    '''Append blob type.'''
-
-    BlockBlob = 'BlockBlob'
-    '''Block blob type.'''
-
-    PageBlob = 'PageBlob'
-    '''Page blob type.'''
-
-
-class Include(object):
-    '''
-    Specifies the datasets to include in the blob list response.
-
-    :ivar ~azure.storage.blob.models.Include Include.COPY: 
-        Specifies that metadata related to any current or previous Copy Blob operation 
-        should be included in the response.
-    :ivar ~azure.storage.blob.models.Include Include.METADATA: 
-        Specifies that metadata be returned in the response.
-    :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: 
-        Specifies that snapshots should be included in the enumeration.
-    :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: 
-        Specifies that blobs for which blocks have been uploaded, but which have not 
-        been committed using Put Block List, be included in the response.
-    '''
-
-    def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False,
-                 copy=False, _str=None):
-        '''
-        :param bool snapshots:
-             Specifies that snapshots should be included in the enumeration.
-        :param bool metadata:
-            Specifies that metadata be returned in the response.
-        :param bool uncommitted_blobs:
-            Specifies that blobs for which blocks have been uploaded, but which have 
-            not been committed using Put Block List, be included in the response.
-        :param bool copy: 
-            Specifies that metadata related to any current or previous Copy Blob 
-            operation should be included in the response. 
-        :param str _str: 
-            A string representing the includes.
-        '''
-        if not _str:
-            _str = ''
-        components = _str.split(',')
-        self.snapshots = snapshots or ('snapshots' in components)
-        self.metadata = metadata or ('metadata' in components)
-        self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
-        self.copy = copy or ('copy' in components)
-
-    def __or__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __str__(self):
-        include = (('snapshots,' if self.snapshots else '') +
-                   ('metadata,' if self.metadata else '') +
-                   ('uncommittedblobs,' if self.uncommitted_blobs else '') +
-                   ('copy,' if self.copy else ''))
-        return include.rstrip(',')
-
-
-Include.COPY = Include(copy=True)
-Include.METADATA = Include(metadata=True)
-Include.SNAPSHOTS = Include(snapshots=True)
-Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
-
-
-class BlobPermissions(object):
-    '''
-    BlobPermissions class to be used with 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
-
-    :ivar BlobPermissions BlobPermissions.ADD:
-        Add a block to an append blob.
-    :ivar BlobPermissions BlobPermissions.CREATE:
-        Write a new blob, snapshot a blob, or copy a blob to a new blob.
-    :ivar BlobPermissions BlobPermissions.DELETE:
-        Delete the blob.
-    :ivar BlobPermissions BlobPermissions.READ:
-        Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
-    :ivar BlobPermissions BlobPermissions.WRITE:
-        Create or write content, properties, metadata, or block list. Snapshot or lease 
-        the blob. Resize the blob (page blob only). Use the blob as the destination of a 
-        copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, add=False, create=False, write=False,
-                 delete=False, _str=None):
-        '''    
-        :param bool read:
-            Read the content, properties, metadata and block list. Use the blob as 
-            the source of a copy operation.
-        :param bool add:
-            Add a block to an append blob.
-        :param bool create:
-            Write a new blob, snapshot a blob, or copy a blob to a new blob.
-        :param bool write: 
-            Create or write content, properties, metadata, or block list. Snapshot 
-            or lease the blob. Resize the blob (page blob only). Use the blob as the 
-            destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the blob.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-BlobPermissions.ADD = BlobPermissions(add=True)
-BlobPermissions.CREATE = BlobPermissions(create=True)
-BlobPermissions.DELETE = BlobPermissions(delete=True)
-BlobPermissions.READ = BlobPermissions(read=True)
-BlobPermissions.WRITE = BlobPermissions(write=True)
-
-
-class ContainerPermissions(object):
-    '''
-    ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
-    API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. 
-
-    :ivar ContainerPermissions ContainerPermissions.DELETE:
-        Delete any blob in the container. Note: You cannot grant permissions to 
-        delete a container with a container SAS. Use an account SAS instead.
-    :ivar ContainerPermissions ContainerPermissions.LIST:
-        List blobs in the container.
-    :ivar ContainerPermissions ContainerPermissions.READ:
-        Read the content, properties, metadata or block list of any blob in the 
-        container. Use any blob in the container as the source of a copy operation.
-    :ivar ContainerPermissions ContainerPermissions.WRITE:
-        For any blob in the container, create or write content, properties, 
-        metadata, or block list. Snapshot or lease the blob. Resize the blob 
-        (page blob only). Use the blob as the destination of a copy operation 
-        within the same account. Note: You cannot grant permissions to read or 
-        write container properties or metadata, nor to lease a container, with 
-        a container SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata or block list of any blob in the 
-            container. Use any blob in the container as the source of a copy operation.
-        :param bool write: 
-            For any blob in the container, create or write content, properties, 
-            metadata, or block list. Snapshot or lease the blob. Resize the blob 
-            (page blob only). Use the blob as the destination of a copy operation 
-            within the same account. Note: You cannot grant permissions to read or 
-            write container properties or metadata, nor to lease a container, with 
-            a container SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any blob in the container. Note: You cannot grant permissions to 
-            delete a container with a container SAS. Use an account SAS instead.
-        :param bool list: 
-            List blobs in the container.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-
-    def __or__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else ''))
-
-
-ContainerPermissions.DELETE = ContainerPermissions(delete=True)
-ContainerPermissions.LIST = ContainerPermissions(list=True)
-ContainerPermissions.READ = ContainerPermissions(read=True)
-ContainerPermissions.WRITE = ContainerPermissions(write=True)
-
-
-class PremiumPageBlobTier(object):
-    '''
-    Specifies the page blob tier to set the blob to. This is only applicable to page
-    blobs on premium storage accounts.
-    Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
-    for detailed information on the corresponding IOPS and throughtput per PageBlobTier.
-    '''
-
-    P4 = 'P4'
-    ''' P4 Tier '''
-
-    P6 = 'P6'
-    ''' P6 Tier '''
-
-    P10 = 'P10'
-    ''' P10 Tier '''
-
-    P20 = 'P20'
-    ''' P20 Tier '''
-
-    P30 = 'P30'
-    ''' P30 Tier '''
-
-    P40 = 'P40'
-    ''' P40 Tier '''
-
-    P50 = 'P50'
-    ''' P50 Tier '''
-
-    P60 = 'P60'
-    ''' P60 Tier '''
-
-
-class StandardBlobTier(object):
-    '''
-    Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts.
-    '''
-
-    Archive = 'Archive'
-    ''' Archive '''
-
-    Cool = 'Cool'
-    ''' Cool '''
-
-    Hot = 'Hot'
-    ''' Hot '''
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/pageblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/pageblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/pageblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/pageblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1395 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_data_bytes_only,
-    _add_metadata_headers,
-)
-from ._deserialization import (
-    _convert_xml_to_page_ranges,
-    _parse_page_properties,
-    _parse_base_properties,
-)
-from ._encryption import _generate_blob_encryption_data
-from ._error import (
-    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import (
-    _PageBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-    ResourceProperties)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
-_PAGE_ALIGNMENT = 512
-
-
-class PageBlobService(BaseBlobService):
-    '''
-    Page blobs are a collection of 512-byte pages optimized for random read and
-    write operations. To create a page blob, you initialize the page blob and
-    specify the maximum size the page blob will grow. To add or update the
-    contents of a page blob, you write a page or pages by specifying an offset
-    and a range that align to 512-byte page boundaries. A write to a page blob
-    can overwrite just one page, some pages, or up to 4 MB of the page blob.
-    Writes to page blobs happen in-place and are immediately committed to the
-    blob. The maximum size for a page blob is 1 TB.
-
-    :ivar int MAX_PAGE_SIZE: 
-        The size of the pages put by create_blob_from_* methods. Smaller pages 
-        may be put if there is less data provided. The maximum page size the service 
-        supports is 4MB.
-    '''
-
-    MAX_PAGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        self.blob_type = _BlobTypes.PageBlob
-        super(PageBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout)
-
-    def create_blob(
-            self, container_name, blob_name, content_length, content_settings=None,
-            sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new Page Blob.
-
-        See create_blob_from_* for high level functions that handle the
-        creation and upload of large blobs with automatic chunking and
-        progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param int content_length:
-            Required. This header specifies the maximum size
-            for the page blob, up to 1 TB. The page blob size must be aligned
-            to a 512-byte boundary.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param int sequence_number:
-            The sequence number is a user-controlled value that you can use to
-            track requests. The value of the sequence number must be between 0
-            and 2^63 - 1.The default value is 0.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the new Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._create_blob(
-            container_name,
-            blob_name,
-            content_length,
-            content_settings=content_settings,
-            sequence_number=sequence_number,
-            metadata=metadata,
-            lease_id=lease_id,
-            premium_page_blob_tier=premium_page_blob_tier,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def incremental_copy_blob(self, container_name, blob_name, copy_source,
-                              metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None,
-                              destination_if_match=None, destination_if_none_match=None, destination_lease_id=None,
-                              source_lease_id=None, timeout=None):
-        '''
-        Copies an incremental copy of a blob asynchronously. This operation returns a copy operation
-        properties object, including a copy ID you can use to check or abort the
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for an incremental copy operation must be a page blob.
-        Call get_blob_properties on the destination blob to check the status of the copy operation.
-        The final blob will be committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure page blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            The copy source must be a snapshot and include a valid SAS token or be public.
-            Example:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>&sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str).
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the destination blob
-            has not been modified since the specified ate/time. If the destination blob
-            has been modified, the Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               source_if_modified_since=None, source_if_unmodified_since=None,
-                               source_if_match=None, source_if_none_match=None,
-                               destination_if_modified_since=destination_if_modified_since,
-                               destination_if_unmodified_since=destination_if_unmodified_since,
-                               destination_if_match=destination_if_match,
-                               destination_if_none_match=destination_if_none_match,
-                               destination_lease_id=destination_lease_id,
-                               source_lease_id=source_lease_id, timeout=timeout,
-                               incremental_copy=True)
-
-    def update_page(
-            self, container_name, blob_name, page, start_range, end_range,
-            validate_content=False, lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        Updates a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes page:
-            Content of the page.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._update_page(
-            container_name,
-            blob_name,
-            page,
-            start_range,
-            end_range,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            if_sequence_number_lte=if_sequence_number_lte,
-            if_sequence_number_lt=if_sequence_number_lt,
-            if_sequence_number_eq=if_sequence_number_eq,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def clear_page(
-            self, container_name, blob_name, start_range, end_range,
-            lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        Clears a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'clear',
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def get_page_ranges(
-            self, container_name, blob_name, snapshot=None, start_range=None,
-            end_range=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Returns the list of valid page ranges for a Page Blob or snapshot
-        of a page blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve information
-            from.
-        :param int start_range:
-            Start of byte range to use for getting valid page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting valid page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of valid Page Ranges for the Page Blob.
-        :rtype: list(:class:`~azure.storage.blob.models.PageRange`)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'pagelist',
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        return self._perform_request(request, _convert_xml_to_page_ranges)
-
-    def get_page_ranges_diff(
-            self, container_name, blob_name, previous_snapshot, snapshot=None,
-            start_range=None, end_range=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        The response will include only the pages that are different between either a
-        recent snapshot or the current blob and a previous snapshot, including pages
-        that were cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str previous_snapshot:
-            The snapshot parameter is an opaque DateTime value that
-            specifies a previous blob snapshot to be compared
-            against a more recent snapshot or the current blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that
-            specifies a more recent blob snapshot to be compared
-            against a previous snapshot (previous_snapshot).
-        :param int start_range:
-            Start of byte range to use for getting different page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting different page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of different Page Ranges for the Page Blob.
-        :rtype: list(:class:`~azure.storage.blob.models.PageRange`)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('previous_snapshot', previous_snapshot)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'pagelist',
-            'snapshot': _to_str(snapshot),
-            'prevsnapshot': _to_str(previous_snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        return self._perform_request(request, _convert_xml_to_page_ranges)
-
-    def set_sequence_number(
-            self, container_name, blob_name, sequence_number_action, sequence_number=None,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Sets the blob sequence number.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str sequence_number_action:
-            This property indicates how the service should modify the blob's sequence
-            number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information.
-        :param str sequence_number:
-            This property sets the blob's sequence number. The sequence number is a
-            user-controlled property that you can use to track requests and manage
-            concurrency issues.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('sequence_number_action', sequence_number_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-sequence-number': _to_str(sequence_number),
-            'x-ms-sequence-number-action': _to_str(sequence_number_action),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def resize_blob(
-            self, container_name, blob_name, content_length,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Resizes a page blob to the specified size. If the specified value is less
-        than the current size of the blob, then all pages above the specified value
-        are cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int content_length:
-            Size to resize blob to.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-content-length': _to_str(content_length),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_page_properties)
-
-    # ----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-            self, container_name, blob_name, file_path, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None, max_connections=2,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout,
-                premium_page_blob_tier=premium_page_blob_tier)
-
-    def create_blob_from_stream(
-            self, container_name, blob_name, stream, count, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            premium_page_blob_tier=None):
-        '''
-        Creates a new blob from a file/stream, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a page
-            blob cannot be created if the count is unknown.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set the blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use. Note that parallel upload 
-            requires the stream to be seekable.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        if count < 0:
-            raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        if count % _PAGE_ALIGNMENT != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
-
-        cek, iv, encryption_data = None, None, None
-        if self.key_encryption_key is not None:
-            cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
-
-        response = self._create_blob(
-            container_name=container_name,
-            blob_name=blob_name,
-            content_length=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            lease_id=lease_id,
-            premium_page_blob_tier=premium_page_blob_tier,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            encryption_data=encryption_data
-        )
-
-        if count == 0:
-            return response
-
-        # _upload_blob_chunks returns the block ids for block blobs so resource_properties
-        # is passed as a parameter to get the last_modified and etag for page and append blobs.
-        # this info is not needed for block_blobs since _put_block_list is called after which gets this info
-        resource_properties = ResourceProperties()
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_PAGE_SIZE,
-            stream=stream,
-            max_connections=max_connections,
-            progress_callback=progress_callback,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            uploader_class=_PageBlobChunkUploader,
-            if_match=response.etag,
-            timeout=timeout,
-            content_encryption_key=cek,
-            initialization_vector=iv,
-            resource_properties=resource_properties
-        )
-
-        return resource_properties
-
-    def create_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the byte array.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            premium_page_blob_tier=premium_page_blob_tier)
-
-    def set_premium_page_blob_tier(
-            self, container_name, blob_name, premium_page_blob_tier,
-            timeout=None):
-        '''
-        Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to update.
-        :param PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('premium_page_blob_tier', premium_page_blob_tier)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'tier',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-access-tier': _to_str(premium_page_blob_tier)
-        }
-
-        self._perform_request(request)
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None,
-                  premium_page_blob_tier=None):
-        '''
-        Copies a blob asynchronously. This operation returns a copy operation
-        properties object, including a copy ID you can use to check or abort the
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation must be a page blob. If the destination
-        blob already exists, it must be of the same blob type as the source blob.
-        Any existing destination blob will be overwritten.
-        The destination blob cannot be modified while a copy operation is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page
-        blob of the source blob's length, initially containing all zeroes. Then
-        the source page ranges are enumerated, and non-empty ranges are copied.
-
-        If the tier on the source blob is larger than the tier being passed to this
-        copy operation or if the size of the blob exceeds the tier being passed to
-        this copy operation then the operation will fail.
-
-        You can call get_blob_properties on the destination
-        blob to check the status of the copy operation. The final blob will be
-        committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str).
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param PageBlobTier premium_page_blob_tier:
-            A page blob tier value to set on the destination blob. The tier correlates to
-            the size of the blob and number of allowed IOPS. This is only applicable to
-            page blobs on premium storage accounts.
-            If the tier on the source blob is larger than the tier being passed to this
-            copy operation or if the size of the blob exceeds the tier being passed to
-            this copy operation then the operation will fail.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata, premium_page_blob_tier,
-                               source_if_modified_since, source_if_unmodified_since,
-                               source_if_match, source_if_none_match,
-                               destination_if_modified_since,
-                               destination_if_unmodified_since,
-                               destination_if_match,
-                               destination_if_none_match,
-                               destination_lease_id,
-                               source_lease_id, timeout,
-                               False)
-
-    # -----Helper methods-----------------------------------------------------
-
-    def _create_blob(
-            self, container_name, blob_name, content_length, content_settings=None,
-            sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            encryption_data=None):
-        '''
-        See create_blob for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        :param str encryption_data:
-            The JSON formatted encryption metadata to upload as a part of the blob.
-            This should only be passed internally from other methods and only applied
-            when uploading entire blob contents immediately follows creation of the blob.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-blob-content-length': _to_str(content_length),
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-blob-sequence-number': _to_str(sequence_number),
-            'x-ms-access-tier': _to_str(premium_page_blob_tier),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        if encryption_data is not None:
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _update_page(
-            self, container_name, blob_name, page, start_range, end_range,
-            validate_content=False, lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        See update_page for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        '''
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'update',
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-        request.body = _get_data_bytes_only('page', page)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_page_properties)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/blob/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/blob/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,188 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-)
-from ._constants import X_MS_VERSION
-
-
-class BlobSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating blob and container access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_blob(self, container_name, blob_name, permission=None,
-                      expiry=None, start=None, id=None, ip=None, protocol=None,
-                      cache_control=None, content_disposition=None,
-                      content_encoding=None, content_language=None,
-                      content_type=None):
-        '''
-        Generates a shared access signature for the blob.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = container_name + '/' + blob_name
-
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('b')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
-
-        return sas.get_token()
-
-    def generate_container(self, container_name, permission=None, expiry=None,
-                           start=None, id=None, ip=None, protocol=None,
-                           cache_control=None, content_disposition=None,
-                           content_encoding=None, content_language=None,
-                           content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('c')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name)
-
-        return sas.get_token()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,44 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ._constants import (
-    __author__,
-    __version__,
-    DEFAULT_X_MS_VERSION,
-)
-from .cloudstorageaccount import CloudStorageAccount
-from .models import (
-    RetentionPolicy,
-    Logging,
-    Metrics,
-    CorsRule,
-    ServiceProperties,
-    AccessPolicy,
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-    Protocol,
-    ServiceStats,
-    GeoReplication,
-    LocationMode,
-    RetryContext,
-)
-from .retry import (
-    ExponentialRetry,
-    LinearRetry,
-    no_retry,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_auth.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_auth.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_auth.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_auth.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,109 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ._common_conversion import (
-    _sign_string,
-)
-
-import logging
-logger = logging.getLogger(__name__)
-
-
-class _StorageSharedKeyAuthentication(object):
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = request.path.split('?')[0]
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        signature = _sign_string(self.account_key, string_to_sign)
-        auth_string = 'SharedKey ' + self.account_name + ':' + signature
-        request.headers['Authorization'] = auth_string
-
-
-class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        logger.debug("String_to_sign=%s", string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value:
-                string_to_sign += '\n' + name.lower() + ':' + value
-
-        return string_to_sign
-
-
-class _StorageNoAuthentication(object):
-    def sign_request(self, request):
-        pass
-
-
-class _StorageSASAuthentication(object):
-    def __init__(self, sas_token):
-        self.sas_token = sas_token
-
-    def sign_request(self, request):
-        # if 'sig=' is present, then the request has already been signed
-        # as is the case when performing retries
-        if 'sig=' in request.path:
-            return
-        if '?' in request.path:
-            request.path += '&'
-        else:
-            request.path += '?'
-
-        request.path += self.sas_token
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_common_conversion.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_common_conversion.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_common_conversion.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_common_conversion.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,135 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-import sys
-from io import (SEEK_SET)
-
-from dateutil.tz import tzutc
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
-)
-from .models import (
-    _unicode_type,
-)
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-def _int_to_str(value):
-    return str(int(value)) if value is not None else None
-
-
-def _bool_to_str(value):
-    if value is None:
-        return None
-
-    if isinstance(value, bool):
-        if value:
-            return 'true'
-        else:
-            return 'false'
-
-    return str(value)
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _datetime_to_utc_string(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value is None:
-        return None
-
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-
-    return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
-
-
-def _encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def _decode_base64_to_bytes(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def _decode_base64_to_text(data):
-    decoded_bytes = _decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def _sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = _decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, _unicode_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, _unicode_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = _encode_base64(digest)
-    return encoded_digest
-
-
-def _get_content_md5(data):
-    md5 = hashlib.md5()
-    if isinstance(data, bytes):
-        md5.update(data)
-    elif hasattr(data, 'read'):
-        pos = 0
-        try:
-            pos = data.tell()
-        except:
-            pass
-        for chunk in iter(lambda: data.read(4096), b""):
-            md5.update(chunk)
-        try:
-            data.seek(pos, SEEK_SET)
-        except (AttributeError, IOError):
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
-    else:
-        raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
-
-    return base64.b64encode(md5.digest()).decode('utf-8')
-
-
-def _lower(text):
-    return text.lower()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_connection.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_connection.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_connection.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_connection.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,163 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from urllib.parse import urlparse
-else:
-    from urlparse import urlparse
-
-from ._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_KEY,
-    DEV_BLOB_HOST,
-    DEV_QUEUE_HOST,
-)
-from ._error import (
-    _ERROR_STORAGE_MISSING_INFO,
-)
-
-_EMULATOR_ENDPOINTS = {
-    'blob': DEV_BLOB_HOST,
-    'queue': DEV_QUEUE_HOST,
-    'file': '',
-}
-
-_CONNECTION_ENDPOINTS = {
-    'blob': 'BlobEndpoint',
-    'queue': 'QueueEndpoint',
-    'file': 'FileEndpoint',
-}
-
-_CONNECTION_ENDPOINTS_SECONDARY = {
-    'blob': 'BlobSecondaryEndpoint',
-    'queue': 'QueueSecondaryEndpoint',
-    'file': 'FileSecondaryEndpoint',
-}
-
-class _ServiceParameters(object):
-    def __init__(self, service, account_name=None, account_key=None, sas_token=None, 
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, 
-                 custom_domain=None, custom_domain_secondary=None):
-
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.protocol = protocol or DEFAULT_PROTOCOL
-        self.is_emulated = is_emulated
-
-        if is_emulated:
-            self.account_name = DEV_ACCOUNT_NAME
-            self.protocol = 'http'
-
-            # Only set the account key if a sas_token is not present to allow sas to be used with the emulator
-            self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
-
-            self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], self.account_name)
-            self.secondary_endpoint = '{}/{}-secondary'.format(_EMULATOR_ENDPOINTS[service], self.account_name)
-        else:
-            # Strip whitespace from the key
-            if self.account_key:
-                self.account_key = self.account_key.strip()
-
-            endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
-
-            # Setup the primary endpoint
-            if custom_domain:
-                parsed_url = urlparse(custom_domain)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.primary_endpoint = parsed_url.netloc + path
-                self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme
-            else:
-                if not self.account_name:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-                self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
-
-            # Setup the secondary endpoint
-            if custom_domain_secondary:
-                if not custom_domain:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)   
-
-                parsed_url = urlparse(custom_domain_secondary)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.secondary_endpoint = parsed_url.netloc + path
-            else:
-                if self.account_name:
-                    self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
-                else:
-                    self.secondary_endpoint = None
-
-    @staticmethod
-    def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, is_emulated=None,
-                               protocol=None, endpoint_suffix=None, custom_domain=None, request_session=None,
-                               connection_string=None, socket_timeout=None):
-        if connection_string:
-            params = _ServiceParameters._from_connection_string(connection_string, service)
-        elif is_emulated:
-            params = _ServiceParameters(service, is_emulated=True)
-        elif account_name:
-            params = _ServiceParameters(service,
-                                        account_name=account_name,
-                                        account_key=account_key,
-                                        sas_token=sas_token,
-                                        is_emulated=is_emulated,
-                                        protocol=protocol,
-                                        endpoint_suffix=endpoint_suffix,
-                                        custom_domain=custom_domain)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        params.request_session = request_session
-        params.socket_timeout = socket_timeout
-        return params
-
-    @staticmethod
-    def _from_connection_string(connection_string, service):
-        # Split into key=value pairs removing empties, then split the pairs into a dict
-        config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
-
-        # Authentication
-        account_name = config.get('AccountName')
-        account_key = config.get('AccountKey')
-        sas_token = config.get('SharedAccessSignature')
-
-        # Emulator
-        is_emulated = config.get('UseDevelopmentStorage')
-
-        # Basic URL Configuration
-        protocol = config.get('DefaultEndpointsProtocol')
-        endpoint_suffix = config.get('EndpointSuffix')
-
-        # Custom URLs
-        endpoint = config.get(_CONNECTION_ENDPOINTS[service])
-        endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
-
-        return _ServiceParameters(service,
-                                  account_name=account_name,
-                                  account_key=account_key,
-                                  sas_token=sas_token,
-                                  is_emulated=is_emulated,
-                                  protocol=protocol,
-                                  endpoint_suffix=endpoint_suffix,
-                                  custom_domain=endpoint,
-                                  custom_domain_secondary=endpoint_secondary)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,47 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import platform
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '0.37.1'
-
-# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)'
-# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package
-USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__)
-USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(),
-                                                          platform.python_version(), platform.system(),
-                                                          platform.release())
-
-# default values for common package, in case it is used directly
-DEFAULT_X_MS_VERSION = '2017-04-17'
-DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX)
-
-# Live ServiceClient URLs
-SERVICE_HOST_BASE = 'core.windows.net'
-DEFAULT_PROTOCOL = 'https'
-
-# Development ServiceClient URLs
-DEV_BLOB_HOST = '127.0.0.1:10000'
-DEV_QUEUE_HOST = '127.0.0.1:10001'
-
-# Default credentials for Development Storage Service
-DEV_ACCOUNT_NAME = 'devstoreaccount1'
-DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
-
-# Socket timeout in seconds
-DEFAULT_SOCKET_TIMEOUT = 20
-
-# Encryption constants
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,354 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-from ._common_conversion import _to_str
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    ServiceProperties,
-    Logging,
-    Metrics,
-    CorsRule,
-    AccessPolicy,
-    _dict,
-    GeoReplication,
-    ServiceStats,
-)
-
-
-def _int_to_str(value):
-    return value if value is None else int(value)
-
-
-def _bool(value):
-    return value.lower() == 'true'
-
-
-def _to_upper_str(value):
-    return _to_str(value).upper() if value is not None else None
-
-
-def _get_download_size(start_range, end_range, resource_size):
-    if start_range is not None:
-        end_range = end_range if end_range else (resource_size if resource_size else None)
-        if end_range is not None:
-            return end_range - start_range
-        else:
-            return None
-    else:
-        return resource_size
-
-
-GET_PROPERTIES_ATTRIBUTE_MAP = {
-    'last-modified': (None, 'last_modified', parser.parse),
-    'etag': (None, 'etag', _to_str),
-    'x-ms-blob-type': (None, 'blob_type', _to_str),
-    'content-length': (None, 'content_length', _int_to_str),
-    'content-range': (None, 'content_range', _to_str),
-    'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _int_to_str),
-    'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _int_to_str),
-    'x-ms-access-tier': (None, 'blob_tier', _to_str),
-    'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse),
-    'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
-    'x-ms-archive-status': (None, 'rehydration_status', _to_str),
-    'x-ms-share-quota': (None, 'quota', _int_to_str),
-    'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
-    'content-type': ('content_settings', 'content_type', _to_str),
-    'cache-control': ('content_settings', 'cache_control', _to_str),
-    'content-encoding': ('content_settings', 'content_encoding', _to_str),
-    'content-disposition': ('content_settings', 'content_disposition', _to_str),
-    'content-language': ('content_settings', 'content_language', _to_str),
-    'content-md5': ('content_settings', 'content_md5', _to_str),
-    'x-ms-lease-status': ('lease', 'status', _to_str),
-    'x-ms-lease-state': ('lease', 'state', _to_str),
-    'x-ms-lease-duration': ('lease', 'duration', _to_str),
-    'x-ms-copy-id': ('copy', 'id', _to_str),
-    'x-ms-copy-source': ('copy', 'source', _to_str),
-    'x-ms-copy-status': ('copy', 'status', _to_str),
-    'x-ms-copy-progress': ('copy', 'progress', _to_str),
-    'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
-    'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str),
-    'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
-}
-
-
-def _parse_metadata(response):
-    '''
-    Extracts out resource metadata information.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    metadata = _dict()
-    for key, value in response.headers.items():
-        if key.startswith('x-ms-meta-'):
-            metadata[key[10:]] = _to_str(value)
-
-    return metadata
-
-
-def _parse_properties(response, result_class):
-    '''
-    Extracts out resource properties and metadata information.
-    Ignores the standard http headers.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    props = result_class()
-    for key, value in response.headers.items():
-        info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
-        if info:
-            if info[0] is None:
-                setattr(props, info[1], info[2](value))
-            else:
-                attr = getattr(props, info[0])
-                setattr(attr, info[1], info[2](value))
-
-    if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None:
-        props.blob_tier = _to_upper_str(props.blob_tier)
-    return props
-
-
-def _parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def _convert_xml_to_signed_identifiers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <SignedIdentifiers>
-      <SignedIdentifier>
-        <Id>unique-value</Id>
-        <AccessPolicy>
-          <Start>start-time</Start>
-          <Expiry>expiry-time</Expiry>
-          <Permission>abbreviated-permission-list</Permission>
-        </AccessPolicy>
-      </SignedIdentifier>
-    </SignedIdentifiers>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    list_element = ETree.fromstring(response.body)
-    signed_identifiers = _dict()
-
-    for signed_identifier_element in list_element.findall('SignedIdentifier'):
-        # Id element
-        id = signed_identifier_element.find('Id').text
-
-        # Access policy element
-        access_policy = AccessPolicy()
-        access_policy_element = signed_identifier_element.find('AccessPolicy')
-        if access_policy_element is not None:
-            start_element = access_policy_element.find('Start')
-            if start_element is not None:
-                access_policy.start = parser.parse(start_element.text)
-
-            expiry_element = access_policy_element.find('Expiry')
-            if expiry_element is not None:
-                access_policy.expiry = parser.parse(expiry_element.text)
-
-            access_policy.permission = access_policy_element.findtext('Permission')
-
-        signed_identifiers[id] = access_policy
-
-    return signed_identifiers
-
-
-def _convert_xml_to_service_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceStats>
-      <GeoReplication>      
-          <Status>live|bootstrap|unavailable</Status>
-          <LastSyncTime>sync-time|<empty></LastSyncTime>
-      </GeoReplication>
-    </StorageServiceStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_stats_element = ETree.fromstring(response.body)
-
-    geo_replication_element = service_stats_element.find('GeoReplication')
-
-    geo_replication = GeoReplication()
-    geo_replication.status = geo_replication_element.find('Status').text
-    last_sync_time = geo_replication_element.find('LastSyncTime').text
-    geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None
-
-    service_stats = ServiceStats()
-    service_stats.geo_replication = geo_replication
-    return service_stats
-
-
-def _convert_xml_to_service_properties(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-    </StorageServiceProperties>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_properties_element = ETree.fromstring(response.body)
-    service_properties = ServiceProperties()
-
-    # Logging
-    logging = service_properties_element.find('Logging')
-    if logging is not None:
-        service_properties.logging = Logging()
-        service_properties.logging.version = logging.find('Version').text
-        service_properties.logging.delete = _bool(logging.find('Delete').text)
-        service_properties.logging.read = _bool(logging.find('Read').text)
-        service_properties.logging.write = _bool(logging.find('Write').text)
-
-        _convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
-                                         service_properties.logging.retention_policy)
-    # HourMetrics
-    hour_metrics_element = service_properties_element.find('HourMetrics')
-    if hour_metrics_element is not None:
-        service_properties.hour_metrics = Metrics()
-        _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
-
-    # MinuteMetrics
-    minute_metrics_element = service_properties_element.find('MinuteMetrics')
-    if minute_metrics_element is not None:
-        service_properties.minute_metrics = Metrics()
-        _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
-
-    # CORS
-    cors = service_properties_element.find('Cors')
-    if cors is not None:
-        service_properties.cors = list()
-        for rule in cors.findall('CorsRule'):
-            allowed_origins = rule.find('AllowedOrigins').text.split(',')
-
-            allowed_methods = rule.find('AllowedMethods').text.split(',')
-
-            max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
-
-            cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
-
-            exposed_headers = rule.find('ExposedHeaders').text
-            if exposed_headers is not None:
-                cors_rule.exposed_headers = exposed_headers.split(',')
-
-            allowed_headers = rule.find('AllowedHeaders').text
-            if allowed_headers is not None:
-                cors_rule.allowed_headers = allowed_headers.split(',')
-
-            service_properties.cors.append(cors_rule)
-
-    # Target version
-    target_version = service_properties_element.find('DefaultServiceVersion')
-    if target_version is not None:
-        service_properties.target_version = target_version.text
-
-    return service_properties
-
-
-def _convert_xml_to_metrics(xml, metrics):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    metrics.version = xml.find('Version').text
-
-    # Enabled
-    metrics.enabled = _bool(xml.find('Enabled').text)
-
-    # IncludeAPIs
-    include_apis_element = xml.find('IncludeAPIs')
-    if include_apis_element is not None:
-        metrics.include_apis = _bool(include_apis_element.text)
-
-    # RetentionPolicy
-    _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
-
-
-def _convert_xml_to_retention_policy(xml, retention_policy):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    retention_policy.enabled = _bool(xml.find('Enabled').text)
-
-    # Days
-    days_element = xml.find('Days')
-    if days_element is not None:
-        retention_policy.days = int(days_element.text)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_encryption.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,242 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-
-from ._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-    __version__,
-)
-from ._error import (
-    _ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
-    _validate_not_none,
-    _validate_encryption_protocol_version,
-    _validate_key_encryption_key_unwrap,
-    _validate_kek_id,
-)
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier, 
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-    
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    except KeyError:
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer 
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol)
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_unwrap(key_encryption_key)
-    _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid())
-
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_error.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,186 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from sys import version_info
-
-if version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-from azure.common import (
-    AzureHttpError,
-    AzureConflictHttpError,
-    AzureMissingResourceHttpError,
-    AzureException,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-)
-
-_ERROR_CONFLICT = 'Conflict ({0})'
-_ERROR_NOT_FOUND = 'Not found ({0})'
-_ERROR_UNKNOWN = 'Unknown error ({0})'
-_ERROR_STORAGE_MISSING_INFO = \
-    'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
-_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
-    'The emulator does not support the file service.'
-_ERROR_ACCESS_POLICY = \
-    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
-    'instance'
-_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
-_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
-_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
-_ERROR_VALUE_NONE = '{0} should not be None.'
-_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
-_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use {0} chunk downloader more than 1 thread must be ' + \
-    'used since get_{0}_to_bytes should be called for single threaded ' + \
-    '{0} downloads.'
-_ERROR_START_END_NEEDED_FOR_MD5 = \
-    'Both end_range and start_range need to be specified ' + \
-    'for getting content MD5.'
-_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
-    'Getting content MD5 for a range greater than 4MB ' + \
-    'is not supported.'
-_ERROR_MD5_MISMATCH = \
-    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
-_ERROR_TOO_MANY_ACCESS_POLICIES = \
-    'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
-    'Encryption version is not supported.'
-_ERROR_DECRYPTION_FAILURE = \
-    'Decryption failed'
-_ERROR_ENCRYPTION_REQUIRED = \
-    'Encryption required but no key was provided.'
-_ERROR_DECRYPTION_REQUIRED = \
-    'Decryption required but neither key nor resolver was provided.' + \
-    ' If you do not want to decypt, please do not set the require encryption flag.'
-_ERROR_INVALID_KID = \
-    'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
-_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
-    'Specified encryption algorithm is not supported.'
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
-                                           ' for this method.'
-_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
-_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
-                            'Data was either not encrypted or metadata has been lost.'
-
-
-def _dont_fail_on_exist(error):
-    ''' don't throw exception if the resource exists.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureConflictHttpError):
-        return False
-    else:
-        raise error
-
-
-def _dont_fail_not_exist(error):
-    ''' don't throw exception if the resource doesn't exist.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureMissingResourceHttpError):
-        return False
-    else:
-        raise error
-
-
-def _http_error_handler(http_error):
-    ''' Simple error handler for azure.'''
-    message = str(http_error)
-    if http_error.respbody is not None:
-        message += '\n' + http_error.respbody.decode('utf-8-sig')
-    raise AzureHttpError(message, http_error.status)
-
-
-def _validate_type_bytes(param_name, param):
-    if not isinstance(param, bytes):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _validate_type_bytes_or_stream(param_name, param):
-    if not (isinstance(param, bytes) or hasattr(param, 'read')):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError(_ERROR_VALUE_NONE.format(param_name))
-
-
-def _validate_content_match(server_md5, computed_md5):
-    if server_md5 != computed_md5:
-        raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
-
-
-def _validate_access_policies(identifiers):
-    if identifiers and len(identifiers) > 5:
-        raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-def _validate_key_encryption_key_unwrap(kek):
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-
-
-def _validate_encryption_required(require_encryption, kek):
-    if require_encryption and (kek is None):
-        raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
-
-
-def _validate_decryption_required(require_encryption, kek, resolver):
-    if (require_encryption and (kek is None) and
-            (resolver is None)):
-        raise ValueError(_ERROR_DECRYPTION_REQUIRED)
-
-
-def _validate_encryption_protocol_version(encryption_protocol):
-    if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-
-
-def _validate_kek_id(kid, resolved_id):
-    if not (kid == resolved_id):
-        raise ValueError(_ERROR_INVALID_KID)
-
-
-def _validate_encryption_unsupported(require_encryption, key_encryption_key):
-    if require_encryption or (key_encryption_key is not None):
-        raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_http/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_http/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_http/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_http/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,83 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-
-class HTTPError(Exception):
-    '''
-    Represents an HTTP Exception when response status code >= 300.
-
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar list headers:
-        the returned headers, as a list of (name, value) pairs
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, respheader, respbody):
-        self.status = status
-        self.respheader = respheader
-        self.respbody = respbody
-        Exception.__init__(self, message)
-
-
-class HTTPResponse(object):
-    '''
-    Represents a response from an HTTP request.
-    
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar dict headers:
-        the returned headers
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, headers, body):
-        self.status = status
-        self.message = message
-        self.headers = headers
-        self.body = body
-
-
-class HTTPRequest(object):
-    '''
-    Represents an HTTP Request.
-
-    :ivar str host:
-        the host name to connect to
-    :ivar str method:
-        the method to use to connect (string such as GET, POST, PUT, etc.)
-    :ivar str path:
-        the uri fragment
-    :ivar dict query:
-        query parameters
-    :ivar dict headers:
-        header values
-    :ivar bytes body:
-        the body of the request.
-    '''
-
-    def __init__(self):
-        self.host = ''
-        self.method = ''
-        self.path = ''
-        self.query = {}  # list of (name, value)
-        self.headers = {}  # list of (header name, header value)
-        self.body = ''
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_http/httpclient.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_http/httpclient.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_http/httpclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_http/httpclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,112 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import logging
-from . import HTTPResponse
-from .._serialization import _get_data_bytes_or_stream_only
-logger = logging.getLogger(__name__)
-
-
-class _HTTPClient(object):
-    '''
-    Takes the request and sends it to cloud service and returns the response.
-    '''
-
-    def __init__(self, protocol=None, session=None, timeout=None):
-        '''
-        :param str protocol:
-            http or https.
-        :param requests.Session session:
-            session object created with requests library (or compatible).
-        :param int timeout:
-            timeout for the http request, in seconds.
-        '''
-        self.protocol = protocol
-        self.session = session
-        self.timeout = timeout
-
-        # By default, requests adds an Accept:*/* and Accept-Encoding to the session, 
-        # which causes issues with some Azure REST APIs. Removing these here gives us 
-        # the flexibility to add it back on a case by case basis.
-        if 'Accept' in self.session.headers:
-            del self.session.headers['Accept']
-
-        if 'Accept-Encoding' in self.session.headers:
-            del self.session.headers['Accept-Encoding']
-
-        self.proxies = None
-
-    def set_proxy(self, host, port, user, password):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        Note that we set the proxies directly on the request later on rather than
-        using the session object as requests has a bug where session proxy is ignored
-        in favor of environment proxy. So, auth will not work unless it is passed
-        directly when making the request as this overrides both.
-
-        :param str host:
-            Address of the proxy. Ex: '192.168.0.100'
-        :param int port:
-            Port of the proxy. Ex: 6000
-        :param str user:
-            User for proxy authorization.
-        :param str password:
-            Password for proxy authorization.
-        '''
-        if user and password:
-            proxy_string = '{}:{}@{}:{}'.format(user, password, host, port)
-        else:
-            proxy_string = '{}:{}'.format(host, port)
-
-        self.proxies = {'http': 'http://{}'.format(proxy_string),
-                        'https': 'https://{}'.format(proxy_string)}
-
-    def perform_request(self, request):
-        '''
-        Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If 
-        the response code indicates an error, raise an HTTPError.    
-        
-        :param HTTPRequest request:
-            The request to serialize and send.
-        :return: An HTTPResponse containing the parsed HTTP response.
-        :rtype: :class:`~azure.storage.common._http.HTTPResponse`
-        '''
-        # Verify the body is in bytes or either a file-like/stream object
-        if request.body:
-            request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-
-        # Construct the URI
-        uri = self.protocol.lower() + '://' + request.host + request.path
-
-        # Send the request
-        response = self.session.request(request.method,
-                                        uri,
-                                        params=request.query,
-                                        headers=request.headers,
-                                        data=request.body or None,
-                                        timeout=self.timeout,
-                                        proxies=self.proxies)
-
-        # Parse the response
-        status = int(response.status_code)
-        response_headers = {}
-        for key, name in response.headers.items():
-            response_headers[key.lower()] = name
-
-        wrap = HTTPResponse(status, response.reason, response_headers, response.content)
-        response.close()
-
-        return wrap
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,348 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-import uuid
-from datetime import date
-from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation)
-from os import fstat
-from time import time
-from wsgiref.handlers import format_date_time
-
-from dateutil.tz import tzutc
-
-if sys.version_info >= (3,):
-    from urllib.parse import quote as url_quote
-else:
-    from urllib2 import quote as url_quote
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES,
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-)
-from .models import (
-    _unicode_type,
-)
-from ._common_conversion import (
-    _str,
-)
-
-
-def _to_utc_datetime(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _update_request(request, x_ms_version, user_agent_string):
-    # Verify body
-    if request.body:
-        request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-        length = _len_plus(request.body)
-
-        # only scenario where this case is plausible is if the stream object is not seekable.
-        if length is None:
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM)
-
-        # if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
-        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
-            request.headers['Content-Length'] = str(length)
-
-    # append addtional headers based on the service
-    request.headers['x-ms-version'] = x_ms_version
-    request.headers['User-Agent'] = user_agent_string
-    request.headers['x-ms-client-request-id'] = str(uuid.uuid1())
-
-    # If the host has a path component (ex local storage), move it
-    path = request.host.split('/', 1)
-    if len(path) == 2:
-        request.host = path[0]
-        request.path = '/{}{}'.format(path[1], request.path)
-
-    # Encode and optionally add local storage prefix to path
-    request.path = url_quote(request.path, '/()$=\',~')
-
-
-def _add_metadata_headers(metadata, request):
-    if metadata:
-        if not request.headers:
-            request.headers = {}
-        for name, value in metadata.items():
-            request.headers['x-ms-meta-' + name] = value
-
-
-def _add_date_header(request):
-    current_time = format_date_time(time())
-    request.headers['x-ms-date'] = current_time
-
-
-def _get_data_bytes_only(param_name, param_value):
-    '''Validates the request body passed in and converts it to bytes
-    if our policy allows it.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _get_data_bytes_or_stream_only(param_name, param_value):
-    '''Validates the request body passed in is a stream/file-like or bytes
-    object.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _get_request_body(request_body):
-    '''Converts an object into a request body.  If it's None
-    we'll return an empty string, if it's one of our objects it'll
-    convert it to XML and return it.  Otherwise we just use the object
-    directly'''
-    if request_body is None:
-        return b''
-
-    if isinstance(request_body, bytes) or isinstance(request_body, IOBase):
-        return request_body
-
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    request_body = str(request_body)
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    return request_body
-
-
-def _convert_signed_identifiers_to_xml(signed_identifiers):
-    if signed_identifiers is None:
-        return ''
-
-    sis = ETree.Element('SignedIdentifiers')
-    for id, access_policy in signed_identifiers.items():
-        # Root signed identifers element
-        si = ETree.SubElement(sis, 'SignedIdentifier')
-
-        # Id element
-        ETree.SubElement(si, 'Id').text = id
-
-        # Access policy element
-        policy = ETree.SubElement(si, 'AccessPolicy')
-
-        if access_policy.start:
-            start = access_policy.start
-            if isinstance(access_policy.start, date):
-                start = _to_utc_datetime(start)
-            ETree.SubElement(policy, 'Start').text = start
-
-        if access_policy.expiry:
-            expiry = access_policy.expiry
-            if isinstance(access_policy.expiry, date):
-                expiry = _to_utc_datetime(expiry)
-            ETree.SubElement(policy, 'Expiry').text = expiry
-
-        if access_policy.permission:
-            ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-    </StorageServiceProperties>
-    '''
-    service_properties_element = ETree.Element('StorageServiceProperties')
-
-    # Logging
-    if logging:
-        logging_element = ETree.SubElement(service_properties_element, 'Logging')
-        ETree.SubElement(logging_element, 'Version').text = logging.version
-        ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
-        ETree.SubElement(logging_element, 'Read').text = str(logging.read)
-        ETree.SubElement(logging_element, 'Write').text = str(logging.write)
-
-        retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
-        _convert_retention_policy_to_xml(logging.retention_policy, retention_element)
-
-    # HourMetrics
-    if hour_metrics:
-        hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
-        _convert_metrics_to_xml(hour_metrics, hour_metrics_element)
-
-    # MinuteMetrics
-    if minute_metrics:
-        minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
-        _convert_metrics_to_xml(minute_metrics, minute_metrics_element)
-
-    # CORS
-    # Make sure to still serialize empty list
-    if cors is not None:
-        cors_element = ETree.SubElement(service_properties_element, 'Cors')
-        for rule in cors:
-            cors_rule = ETree.SubElement(cors_element, 'CorsRule')
-            ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
-            ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
-            ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
-            ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
-            ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
-
-    # Target version
-    if target_version:
-        ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
-                                                            method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_metrics_to_xml(metrics, root):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    ETree.SubElement(root, 'Version').text = metrics.version
-
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
-
-    # IncludeAPIs
-    if metrics.enabled and metrics.include_apis is not None:
-        ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
-
-    # RetentionPolicy
-    retention_element = ETree.SubElement(root, 'RetentionPolicy')
-    _convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
-
-
-def _convert_retention_policy_to_xml(retention_policy, root):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
-
-    # Days
-    if retention_policy.enabled and retention_policy.days:
-        ETree.SubElement(root, 'Days').text = str(retention_policy.days)
-
-
-def _len_plus(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            return fstat(fileno).st_size
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/cloudstorageaccount.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/cloudstorageaccount.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/cloudstorageaccount.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/cloudstorageaccount.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,197 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-# Note that we import BlobService/QueueService/FileService on demand
-# because this module is imported by azure/storage/__init__
-# ie. we don't want 'import azure.storage' to trigger an automatic import
-# of blob/queue/file packages.
-
-from ._error import _validate_not_none
-from .models import (
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-
-
-class CloudStorageAccount(object):
-    """
-    Provides a factory for creating the blob, queue, and file services
-    with a common account name and account key or sas token.  Users can either 
-    use the factory or can construct the appropriate service directly.
-    """
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless is_emulated is used.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.is_emulated = is_emulated
-
-    def create_block_blob_service(self):
-        '''
-        Creates a BlockBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService`
-        '''
-        try:
-            from ..blob.blockblobservice import BlockBlobService
-            return BlockBlobService(self.account_name, self.account_key,
-                                    sas_token=self.sas_token,
-                                    is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_page_blob_service(self):
-        '''
-        Creates a PageBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService`
-        '''
-        try:
-            from ..blob.pageblobservice import PageBlobService
-            return PageBlobService(self.account_name, self.account_key,
-                                   sas_token=self.sas_token,
-                                   is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_append_blob_service(self):
-        '''
-        Creates a AppendBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService`
-        '''
-        try:
-            from ..blob.appendblobservice import AppendBlobService
-            return AppendBlobService(self.account_name, self.account_key,
-                                     sas_token=self.sas_token,
-                                     is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_queue_service(self):
-        '''
-        Creates a QueueService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.queue.queueservice.QueueService`
-        '''
-        try:
-            from ..queue.queueservice import QueueService
-            return QueueService(self.account_name, self.account_key,
-                                sas_token=self.sas_token,
-                                is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-queue is required. '
-                            + 'Please install it using "pip install azure-storage-queue"')
-
-    def create_file_service(self):
-        '''
-        Creates a FileService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.file.fileservice.FileService`
-        '''
-        try:
-            from ..file.fileservice import FileService
-            return FileService(self.account_name, self.account_key,
-                               sas_token=self.sas_token)
-        except ImportError:
-            raise Exception('The package azure-storage-file is required. '
-                            + 'Please install it using "pip install azure-storage-file"')
-
-    def generate_shared_access_signature(self, services, resource_types,
-                                         permission, expiry, start=None,
-                                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(services, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/models.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,623 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info < (3,):
-    from collections import Iterable
-
-    _unicode_type = unicode
-else:
-    from collections.abc import Iterable
-
-    _unicode_type = str
-
-from ._error import (
-    _validate_not_none
-)
-
-
-class _HeaderDict(dict):
-    def __getitem__(self, index):
-        return super(_HeaderDict, self).__getitem__(index.lower())
-
-
-class _list(list):
-    '''Used so that additional properties can be set on the return list'''
-    pass
-
-
-class _dict(dict):
-    '''Used so that additional properties can be set on the return dictionary'''
-    pass
-
-
-class _OperationContext(object):
-    '''
-    Contains information that lasts the lifetime of an operation. This operation 
-    may span multiple calls to the Azure service.
-
-    :ivar bool location_lock: 
-        Whether the location should be locked for this operation.
-    :ivar str location: 
-        The location to lock to.
-    '''
-
-    def __init__(self, location_lock=False):
-        self.location_lock = location_lock
-        self.host_location = None
-
-
-class ListGenerator(Iterable):
-    '''
-    A generator object used to list storage resources. The generator will lazily 
-    follow the continuation tokens returned by the service and stop when all 
-    resources have been returned or max_results is reached.
-
-    If max_results is specified and the account has more than that number of 
-    resources, the generator will have a populated next_marker field once it 
-    finishes. This marker can be used to create a new generator if more 
-    results are desired.
-    '''
-
-    def __init__(self, resources, list_method, list_args, list_kwargs):
-        self.items = resources
-        self.next_marker = resources.next_marker
-
-        self._list_method = list_method
-        self._list_args = list_args
-        self._list_kwargs = list_kwargs
-
-    def __iter__(self):
-        # return results
-        for i in self.items:
-            yield i
-
-        while True:
-            # if no more results on the service, return
-            if not self.next_marker:
-                break
-
-            # update the marker args
-            self._list_kwargs['marker'] = self.next_marker
-
-            # handle max results, if present
-            max_results = self._list_kwargs.get('max_results')
-            if max_results is not None:
-                max_results = max_results - len(self.items)
-
-                # if we've reached max_results, return
-                # else, update the max_results arg
-                if max_results <= 0:
-                    break
-                else:
-                    self._list_kwargs['max_results'] = max_results
-
-            # get the next segment
-            resources = self._list_method(*self._list_args, **self._list_kwargs)
-            self.items = resources
-            self.next_marker = resources.next_marker
-
-            # return results
-            for i in self.items:
-                yield i
-
-
-class RetryContext(object):
-    '''
-    Contains the request and response information that can be used to determine 
-    whether and how to retry. This context is stored across retries and may be 
-    used to store other information relevant to the retry strategy.
-
-    :ivar ~azure.storage.common._http.HTTPRequest request:
-        The request sent to the storage service.
-    :ivar ~azure.storage.common._http.HTTPResponse response:
-        The response returned by the storage service.
-    :ivar LocationMode location_mode:
-        The location the request was sent to.
-    :ivar Exception exception:
-        The exception that just occurred. The type could either be AzureException (for HTTP errors),
-        or other Exception types from lower layers, which are kept unwrapped for easier processing.
-    '''
-
-    def __init__(self):
-        self.request = None
-        self.response = None
-        self.location_mode = None
-        self.exception = None
-
-
-class LocationMode(object):
-    '''
-    Specifies the location the request should be sent to. This mode only applies 
-    for RA-GRS accounts which allow secondary read access. All other account types 
-    must use PRIMARY.
-    '''
-
-    PRIMARY = 'primary'
-    ''' Requests should be sent to the primary location. '''
-
-    SECONDARY = 'secondary'
-    ''' Requests should be sent to the secondary location, if possible. '''
-
-
-class RetentionPolicy(object):
-    '''
-    By default, Storage Analytics will not delete any logging or metrics data. Blobs
-    will continue to be written until the shared 20TB limit is
-    reached. Once the 20TB limit is reached, Storage Analytics will stop writing 
-    new data and will not resume until free space is available. This 20TB limit 
-    is independent of the total limit for your storage account.
-
-    There are two ways to delete Storage Analytics data: by manually making deletion 
-    requests or by setting a data retention policy. Manual requests to delete Storage 
-    Analytics data are billable, but delete requests resulting from a retention policy 
-    are not billable.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled: 
-            Indicates whether a retention policy is enabled for the 
-            storage service. If disabled, logging and metrics data will be retained 
-            infinitely by the service unless explicitly deleted.
-        :param int days: 
-            Required if enabled is true. Indicates the number of 
-            days that metrics or logging data should be retained. All data older 
-            than this value will be deleted. The minimum value you can specify is 1; 
-            the largest value is 365 (one year).
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class Logging(object):
-    '''
-    Storage Analytics logs detailed information about successful and failed requests 
-    to a storage service. This information can be used to monitor individual requests 
-    and to diagnose issues with a storage service. Requests are logged on a best-effort 
-    basis.
-
-    All logs are stored in block blobs in a container named $logs, which is
-    automatically created when Storage Analytics is enabled for a storage account. 
-    The $logs container is located in the blob namespace of the storage account. 
-    This container cannot be deleted once Storage Analytics has been enabled, though 
-    its contents can be deleted.
-
-    For more information, see  https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
-    '''
-
-    def __init__(self, delete=False, read=False, write=False,
-                 retention_policy=None):
-        '''
-        :param bool delete: 
-            Indicates whether all delete requests should be logged.
-        :param bool read: 
-            Indicates whether all read requests should be logged.
-        :param bool write: 
-            Indicates whether all write requests should be logged.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("read", read)
-        _validate_not_none("write", write)
-        _validate_not_none("delete", delete)
-
-        self.version = u'1.0'
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class Metrics(object):
-    '''
-    Metrics include aggregated transaction statistics and capacity data about requests 
-    to a storage service. Transactions are reported at both the API operation level 
-    as well as at the storage service level, and capacity is reported at the storage 
-    service level. Metrics data can be used to analyze storage service usage, diagnose 
-    issues with requests made against the storage service, and to improve the 
-    performance of applications that use a service.
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
-    '''
-
-    def __init__(self, enabled=False, include_apis=None,
-                 retention_policy=None):
-        '''
-        :param bool enabled: 
-            Indicates whether metrics are enabled for 
-            the service.
-        :param bool include_apis: 
-            Required if enabled is True. Indicates whether metrics 
-            should generate summary statistics for called API operations.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("include_apis", include_apis)
-
-        self.version = u'1.0'
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class CorsRule(object):
-    '''
-    CORS is an HTTP feature that enables a web application running under one domain 
-    to access resources in another domain. Web browsers implement a security 
-    restriction known as same-origin policy that prevents a web page from calling 
-    APIs in a different domain; CORS provides a secure way to allow one domain 
-    (the origin domain) to call APIs in another domain. 
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
-                 exposed_headers=None, allowed_headers=None):
-        '''
-        :param allowed_origins: 
-            A list of origin domains that will be allowed via CORS, or "*" to allow 
-            all domains. The list of must contain at least one entry. Limited to 64 
-            origin domains. Each allowed origin can have up to 256 characters.
-        :type allowed_origins: list(str)
-        :param allowed_methods:
-            A list of HTTP methods that are allowed to be executed by the origin. 
-            The list of must contain at least one entry. For Azure Storage, 
-            permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-        :type allowed_methods: list(str)
-        :param int max_age_in_seconds:
-            The number of seconds that the client/browser should cache a 
-            preflight response.
-        :param exposed_headers:
-            Defaults to an empty list. A list of response headers to expose to CORS 
-            clients. Limited to 64 defined headers and two prefixed headers. Each 
-            header can be up to 256 characters.
-        :type exposed_headers: list(str)
-        :param allowed_headers:
-            Defaults to an empty list. A list of headers allowed to be part of 
-            the cross-origin request. Limited to 64 defined headers and 2 prefixed 
-            headers. Each header can be up to 256 characters.
-        :type allowed_headers: list(str)
-        '''
-        _validate_not_none("allowed_origins", allowed_origins)
-        _validate_not_none("allowed_methods", allowed_methods)
-        _validate_not_none("max_age_in_seconds", max_age_in_seconds)
-
-        self.allowed_origins = allowed_origins if allowed_origins else list()
-        self.allowed_methods = allowed_methods if allowed_methods else list()
-        self.max_age_in_seconds = max_age_in_seconds
-        self.exposed_headers = exposed_headers if exposed_headers else list()
-        self.allowed_headers = allowed_headers if allowed_headers else list()
-
-
-class ServiceProperties(object):
-    ''' 
-    Returned by get_*_service_properties functions. Contains the properties of a 
-    storage service, including Analytics and CORS rules.
-
-    Azure Storage Analytics performs logging and provides metrics data for a storage 
-    account. You can use this data to trace requests, analyze usage trends, and 
-    diagnose issues with your storage account. To use Storage Analytics, you must 
-    enable it individually for each service you want to monitor.
-
-    The aggregated data is stored in a well-known blob (for logging) and in well-known 
-    tables (for metrics), which may be accessed using the Blob service and Table 
-    service APIs.
-
-    For an in-depth guide on using Storage Analytics and other tools to identify, 
-    diagnose, and troubleshoot Azure Storage-related issues, see 
-    http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
-
-    For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    pass
-
-
-class ServiceStats(object):
-    ''' 
-    Returned by get_*_service_stats functions. Contains statistics related to 
-    replication for the given service. It is only available when read-access 
-    geo-redundant replication is enabled for the storage account.
-
-    :ivar GeoReplication geo_replication:
-        An object containing statistics related to replication for the given service.
-    '''
-    pass
-
-
-class GeoReplication(object):
-    ''' 
-    Contains statistics related to replication for the given service.
-
-    :ivar str status:
-        The status of the secondary location. Possible values are:
-            live: Indicates that the secondary location is active and operational.
-            bootstrap: Indicates initial synchronization from the primary location 
-                to the secondary location is in progress. This typically occurs 
-                when replication is first enabled.
-            unavailable: Indicates that the secondary location is temporarily 
-                unavailable.
-    :ivar date last_sync_time:
-        A GMT date value, to the second. All primary writes preceding this value 
-        are guaranteed to be available for read operations at the secondary. 
-        Primary writes after this point in time may or may not be available for 
-        reads. The value may be empty if LastSyncTime is not available. This can 
-        happen if the replication status is bootstrap or unavailable. Although 
-        geo-replication is continuously enabled, the LastSyncTime result may 
-        reflect a cached value from the service that is refreshed every few minutes.
-    '''
-    pass
-
-
-class AccessPolicy(object):
-    '''
-    Access Policy class used by the set and get acl methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and 
-    permissions for the Shared Access Signatures with which it's associated. 
-    Depending on how you want to control access to your resource, you can
-    specify all of these parameters within the stored access policy, and omit 
-    them from the URL for the Shared Access Signature. Doing so permits you to 
-    modify the associated signature's behavior at any time, as well as to revoke 
-    it. Or you can specify one or more of the access policy parameters within 
-    the stored access policy, and the others on the URL. Finally, you can 
-    specify all of the parameters on the URL. In this case, you can use the 
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must 
-    include all fields required to authenticate the signature. If any required 
-    fields are missing, the request will fail. Likewise, if a field is specified 
-    both in the Shared Access Signature URL and in the stored access policy, the 
-    request will fail with status code 400 (Bad Request).
-    '''
-
-    def __init__(self, permission=None, expiry=None, start=None):
-        '''
-        :param str permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        '''
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class Protocol(object):
-    '''
-    Specifies the protocol permitted for a SAS token. Note that HTTP only is 
-    not allowed.
-    '''
-
-    HTTPS = 'https'
-    ''' Allow HTTPS requests only. '''
-
-    HTTPS_HTTP = 'https,http'
-    ''' Allow HTTP and HTTPS requests. '''
-
-
-class ResourceTypes(object):
-    '''
-    Specifies the resource types that are accessible with the account SAS.
-
-    :ivar ResourceTypes ResourceTypes.CONTAINER:
-        Access to container-level APIs (e.g., Create/Delete Container, 
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories) 
-    :ivar ResourceTypes ResourceTypes.OBJECT:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-    :ivar ResourceTypes ResourceTypes.SERVICE:
-        Access to service-level APIs (e.g., Get/Set Service Properties, 
-        Get Service Stats, List Containers/Queues/Shares)
-    '''
-
-    def __init__(self, service=False, container=False, object=False, _str=None):
-        '''
-        :param bool service:
-            Access to service-level APIs (e.g., Get/Set Service Properties, 
-            Get Service Stats, List Containers/Queues/Shares)
-        :param bool container:
-            Access to container-level APIs (e.g., Create/Delete Container, 
-            Create/Delete Queue, Create/Delete Share,
-            List Blobs/Files and Directories) 
-        :param bool object:
-            Access to object-level APIs for blobs, queue messages, and
-            files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-        :param str _str: 
-            A string representing the resource types.
-        '''
-        if not _str:
-            _str = ''
-        self.service = service or ('s' in _str)
-        self.container = container or ('c' in _str)
-        self.object = object or ('o' in _str)
-
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-
-ResourceTypes.SERVICE = ResourceTypes(service=True)
-ResourceTypes.CONTAINER = ResourceTypes(container=True)
-ResourceTypes.OBJECT = ResourceTypes(object=True)
-
-
-class Services(object):
-    '''
-    Specifies the services accessible with the account SAS.
-
-    :ivar Services Services.BLOB: The blob service.
-    :ivar Services Services.FILE: The file service
-    :ivar Services Services.QUEUE: The queue service.
-    '''
-
-    def __init__(self, blob=False, queue=False, file=False, _str=None):
-        '''
-        :param bool blob:
-            Access to any blob service, for example, the `.BlockBlobService`
-        :param bool queue:
-            Access to the `.QueueService`
-        :param bool file:
-            Access to the `.FileService`
-        :param str _str: 
-            A string representing the services.
-        '''
-        if not _str:
-            _str = ''
-        self.blob = blob or ('b' in _str)
-        self.queue = queue or ('q' in _str)
-        self.file = file or ('f' in _str)
-
-    def __or__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('f' if self.file else ''))
-
-
-Services.BLOB = Services(blob=True)
-Services.QUEUE = Services(queue=True)
-Services.FILE = Services(file=True)
-
-
-class AccountPermissions(object):
-    '''
-    :class:`~ResourceTypes` class to be used with generate_shared_access_signature 
-    method and for the AccessPolicies used with set_*_acl. There are two types of 
-    SAS which may be used to grant resource access. One is to grant access to a 
-    specific resource (resource-specific). Another is to grant access to the 
-    entire service for a specific account and allow certain operations based on 
-    perms found here.
-
-    :ivar AccountPermissions AccountPermissions.ADD:
-        Valid for the following Object resource types only: queue messages and append blobs.
-    :ivar AccountPermissions AccountPermissions.CREATE:
-        Valid for the following Object resource types only: blobs and files. Users 
-        can create new blobs or files, but may not overwrite existing blobs or files. 
-    :ivar AccountPermissions AccountPermissions.DELETE:
-        Valid for Container and Object resource types, except for queue messages. 
-    :ivar AccountPermissions AccountPermissions.LIST:
-        Valid for Service and Container resource types only. 
-    :ivar AccountPermissions AccountPermissions.PROCESS:
-        Valid for the following Object resource type only: queue messages. 
-    :ivar AccountPermissions AccountPermissions.READ:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits read permissions to the specified resource type. 
-    :ivar AccountPermissions AccountPermissions.UPDATE:
-        Valid for the following Object resource types only: queue messages.
-    :ivar AccountPermissions AccountPermissions.WRITE:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits write permissions to the specified resource type. 
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 add=False, create=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits read permissions to the specified resource type.
-        :param bool write:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits write permissions to the specified resource type.
-        :param bool delete: 
-            Valid for Container and Object resource types, except for queue messages.
-        :param bool list:
-            Valid for Service and Container resource types only.
-        :param bool add:
-            Valid for the following Object resource types only: queue messages, and append blobs.
-        :param bool create:
-            Valid for the following Object resource types only: blobs and files. 
-            Users can create new blobs or files, but may not overwrite existing 
-            blobs or files.
-        :param bool update:
-            Valid for the following Object resource types only: queue messages.
-        :param bool process:
-            Valid for the following Object resource type only: queue messages.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return AccountPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return AccountPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-AccountPermissions.READ = AccountPermissions(read=True)
-AccountPermissions.WRITE = AccountPermissions(write=True)
-AccountPermissions.DELETE = AccountPermissions(delete=True)
-AccountPermissions.LIST = AccountPermissions(list=True)
-AccountPermissions.ADD = AccountPermissions(add=True)
-AccountPermissions.CREATE = AccountPermissions(create=True)
-AccountPermissions.UPDATE = AccountPermissions(update=True)
-AccountPermissions.PROCESS = AccountPermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/retry.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/retry.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/retry.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/retry.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,285 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from abc import ABCMeta
-from math import pow
-import random
-
-from .models import LocationMode
-
-
-class _Retry(object):
-    '''
-    The base class for Exponential and Linear retries containing shared code.
-    '''
-    __metaclass__ = ABCMeta
-
-    def __init__(self, max_attempts, retry_to_secondary):
-        '''
-        Constructs a base retry object.
-
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        '''
-        self.max_attempts = max_attempts
-        self.retry_to_secondary = retry_to_secondary
-
-    def _should_retry(self, context):
-        '''
-        A function which determines whether or not to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :return: 
-            A boolean indicating whether or not to retry the request.
-        :rtype: bool
-        '''
-        # If max attempts are reached, do not retry.
-        if context.count >= self.max_attempts:
-            return False
-
-        status = None
-        if context.response and context.response.status:
-            status = context.response.status
-
-        if status is None:
-            '''
-            If status is None, retry as this request triggered an exception. For 
-            example, network issues would trigger this.
-            '''
-            return True
-        elif 200 <= status < 300:
-            '''
-            This method is called after a successful response, meaning we failed 
-            during the response body download or parsing. So, success codes should 
-            be retried.
-            '''
-            return True
-        elif 300 <= status < 500:
-            '''
-            An exception occured, but in most cases it was expected. Examples could 
-            include a 309 Conflict or 412 Precondition Failed.
-            '''
-            if status == 404 and context.location_mode == LocationMode.SECONDARY:
-                # Response code 404 should be retried if secondary was used.
-                return True
-            if status == 408:
-                # Response code 408 is a timeout and should be retried.
-                return True
-            return False
-        elif status >= 500:
-            '''
-            Response codes above 500 with the exception of 501 Not Implemented and 
-            505 Version Not Supported indicate a server issue and should be retried.
-            '''
-            if status == 501 or status == 505:
-                return False
-            return True
-        else:
-            # If something else happened, it's unexpected. Retry.
-            return True
-
-    def _set_next_host_location(self, context):
-        '''
-        A function which sets the next host location on the request, if applicable. 
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context containing the previous host location and the request 
-            to evaluate and possibly modify.
-        '''
-        if len(context.request.host_locations) > 1:
-            # If there's more than one possible location, retry to the alternative
-            if context.location_mode == LocationMode.PRIMARY:
-                context.location_mode = LocationMode.SECONDARY
-            else:
-                context.location_mode = LocationMode.PRIMARY
-
-            context.request.host = context.request.host_locations.get(context.location_mode)
-
-    def _retry(self, context, backoff):
-        '''
-        A function which determines whether and how to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :param function() backoff:
-            A function which returns the backoff time if a retry is to be performed.
-        :return: 
-            An integer indicating how long to wait before retrying the request, 
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        '''
-        # If the context does not contain a count parameter, this request has not 
-        # been retried yet. Add the count parameter to track the number of retries.
-        if not hasattr(context, 'count'):
-            context.count = 0
-
-        # Determine whether to retry, and if so increment the count, modify the 
-        # request as desired, and return the backoff.
-        if self._should_retry(context):
-            backoff_interval = backoff(context)
-            context.count += 1
-
-            # If retry to secondary is enabled, attempt to change the host if the 
-            # request allows it
-            if self.retry_to_secondary:
-                self._set_next_host_location(context)
-
-            return backoff_interval
-
-        return None
-
-
-class ExponentialRetry(_Retry):
-    '''
-    Exponential retry.
-    '''
-
-    def __init__(self, initial_backoff=15, increment_power=3, max_attempts=3,
-                 retry_to_secondary=False, random_jitter_range=3):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for 
-        the first retry. Subsequent retries are retried after initial_backoff + 
-        increment_power^retry_count seconds. For example, by default the first retry 
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the 
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff: 
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_power:
-            The base, in seconds, to increment the initial_backoff by after the 
-            first retry.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_power = increment_power
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_power, context.count))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(_Retry):
-    '''
-    Linear retry.
-    '''
-
-    def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3):
-        '''
-        Constructs a Linear retry object.
-
-        :param int backoff: 
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.backoff = backoff
-        self.max_attempts = max_attempts
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0
-        self.random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(self.random_range_start, self.random_range_end)
-
-
-def no_retry(context):
-    '''
-    Specifies never to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context.
-    :return: 
-        Always returns None to indicate never to retry.
-    :rtype: None
-    '''
-    return None
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,226 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from datetime import date
-
-from ._common_conversion import (
-    _sign_string,
-    _to_str,
-)
-from ._constants import DEFAULT_X_MS_VERSION
-from ._serialization import (
-    url_quote,
-    _to_utc_datetime,
-)
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the 
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _to_str(val)
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(_QueryStringConstants.SIGNED_START, start)
-        self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(_QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, id):
-        self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_resource_signature(self, account_name, account_key, service, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        if service == 'blob' or service == 'file':
-            string_to_sign += \
-                (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/storageclient.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/storageclient.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/common/storageclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/common/storageclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,368 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import sys
-from abc import ABCMeta
-import logging
-
-logger = logging.getLogger(__name__)
-from time import sleep
-
-import requests
-from azure.common import (
-    AzureException,
-)
-
-from ._constants import (
-    DEFAULT_SOCKET_TIMEOUT,
-    DEFAULT_X_MS_VERSION,
-    DEFAULT_USER_AGENT_STRING,
-    USER_AGENT_STRING_PREFIX,
-    USER_AGENT_STRING_SUFFIX,
-)
-from ._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _http_error_handler,
-)
-from ._http import HTTPError
-from ._http.httpclient import _HTTPClient
-from ._serialization import (
-    _update_request,
-    _add_date_header,
-)
-from .models import (
-    RetryContext,
-    LocationMode,
-    _OperationContext,
-)
-from .retry import ExponentialRetry
-
-
-class StorageClient(object):
-    '''
-    This is the base class for service objects. Service objects are used to do 
-    all requests to Storage. This class cannot be instantiated directly.
-
-    :ivar str account_name:
-        The storage account name. This is used to authenticate requests 
-        signed with an account key and to construct the storage endpoint. It 
-        is required unless a connection string is given, or if a custom 
-        domain is used with anonymous authentication.
-    :ivar str account_key:
-        The storage account key. This is used for shared key authentication. 
-        If neither account key or sas token is specified, anonymous access 
-        will be used.
-    :ivar str sas_token:
-        A shared access signature token to use to authenticate requests 
-        instead of the account key. If account key and sas token are both 
-        specified, account key will be used to sign. If neither are 
-        specified, anonymous access will be used.
-    :ivar str primary_endpoint:
-        The endpoint to send storage requests to.
-    :ivar str secondary_endpoint:
-        The secondary endpoint to read storage data from. This will only be a 
-        valid endpoint if the storage account used is RA-GRS and thus allows 
-        reading from secondary.
-    :ivar function(context) retry:
-        A function which determines whether to retry. Takes as a parameter a 
-        :class:`~azure.storage.common.models.RetryContext` object. Returns the number
-        of seconds to wait before retrying the request, or None to indicate not 
-        to retry.
-    :ivar ~azure.storage.common.models.LocationMode location_mode:
-        The host location to use to make requests. Defaults to LocationMode.PRIMARY.
-        Note that this setting only applies to RA-GRS accounts as other account 
-        types do not allow reading from secondary. If the location_mode is set to 
-        LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. 
-        Write requests will continue to be sent to primary.
-    :ivar str protocol:
-        The protocol to use for requests. Defaults to https.
-    :ivar requests.Session request_session:
-        The session object to use for http requests.
-    :ivar function(request) request_callback:
-        A function called immediately before each request is sent. This function 
-        takes as a parameter the request object and returns nothing. It may be 
-        used to added custom headers or log request data.
-    :ivar function() response_callback:
-        A function called immediately after each response is received. This 
-        function takes as a parameter the response object and returns nothing. 
-        It may be used to log response data.
-    :ivar function() retry_callback:
-        A function called immediately after retry evaluation is performed. This 
-        function takes as a parameter the retry context object and returns nothing. 
-        It may be used to detect retries and log context information.
-    '''
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, connection_params):
-        '''
-        :param obj connection_params: The parameters to use to construct the client.
-        '''
-        self.account_name = connection_params.account_name
-        self.account_key = connection_params.account_key
-        self.sas_token = connection_params.sas_token
-        self.is_emulated = connection_params.is_emulated
-
-        self.primary_endpoint = connection_params.primary_endpoint
-        self.secondary_endpoint = connection_params.secondary_endpoint
-
-        protocol = connection_params.protocol
-        request_session = connection_params.request_session or requests.Session()
-        socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT
-        self._httpclient = _HTTPClient(
-            protocol=protocol,
-            session=request_session,
-            timeout=socket_timeout,
-        )
-
-        self.retry = ExponentialRetry().retry
-        self.location_mode = LocationMode.PRIMARY
-
-        self.request_callback = None
-        self.response_callback = None
-        self.retry_callback = None
-        self._X_MS_VERSION = DEFAULT_X_MS_VERSION
-        self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING
-
-    def _update_user_agent_string(self, service_package_version):
-        self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX,
-                                                   service_package_version,
-                                                   USER_AGENT_STRING_SUFFIX)
-
-    @property
-    def socket_timeout(self):
-        return self._httpclient.timeout
-
-    @socket_timeout.setter
-    def socket_timeout(self, value):
-        self._httpclient.timeout = value
-
-    @property
-    def protocol(self):
-        return self._httpclient.protocol
-
-    @protocol.setter
-    def protocol(self, value):
-        self._httpclient.protocol = value
-
-    @property
-    def request_session(self):
-        return self._httpclient.session
-
-    @request_session.setter
-    def request_session(self, value):
-        self._httpclient.session = value
-
-    def set_proxy(self, host, port, user=None, password=None):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        :param str host: Address of the proxy. Ex: '192.168.0.100'
-        :param int port: Port of the proxy. Ex: 6000
-        :param str user: User for proxy authorization.
-        :param str password: Password for proxy authorization.
-        '''
-        self._httpclient.set_proxy(host, port, user, password)
-
-    def _get_host_locations(self, primary=True, secondary=False):
-        locations = {}
-        if primary:
-            locations[LocationMode.PRIMARY] = self.primary_endpoint
-        if secondary:
-            locations[LocationMode.SECONDARY] = self.secondary_endpoint
-        return locations
-
-    def _apply_host(self, request, operation_context, retry_context):
-        if operation_context.location_lock and operation_context.host_location:
-            # If this is a location locked operation and the location is set, 
-            # override the request location and host_location.
-            request.host_locations = operation_context.host_location
-            request.host = list(operation_context.host_location.values())[0]
-            retry_context.location_mode = list(operation_context.host_location.keys())[0]
-        elif len(request.host_locations) == 1:
-            # If only one location is allowed, use that location.
-            request.host = list(request.host_locations.values())[0]
-            retry_context.location_mode = list(request.host_locations.keys())[0]
-        else:
-            # If multiple locations are possible, choose based on the location mode.
-            request.host = request.host_locations.get(self.location_mode)
-            retry_context.location_mode = self.location_mode
-
-    @staticmethod
-    def extract_date_and_request_id(retry_context):
-        if getattr(retry_context, 'response', None) is None:
-            return ""
-        resp = retry_context.response
-
-        if 'date' in resp.headers and 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Timestamp={0}, Server-Request-ID={1}",
-                              resp.headers['date'], resp.headers['x-ms-request-id'])
-        elif 'date' in resp.headers:
-            return str.format("Server-Timestamp={0}", resp.headers['date'])
-        elif 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id'])
-        else:
-            return ""
-
-    def _perform_request(self, request, parser=None, parser_args=None, operation_context=None):
-        '''
-        Sends the request and return response. Catches HTTPError and hands it
-        to error handler
-        '''
-        operation_context = operation_context or _OperationContext()
-        retry_context = RetryContext()
-
-        # Apply the appropriate host based on the location mode
-        self._apply_host(request, operation_context, retry_context)
-
-        # Apply common settings to the request
-        _update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
-        client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
-
-        while True:
-            try:
-                try:
-                    # Execute the request callback 
-                    if self.request_callback:
-                        self.request_callback(request)
-
-                    # Add date and auth after the callback so date doesn't get too old and 
-                    # authentication is still correct if signed headers are added in the request 
-                    # callback. This also ensures retry policies with long back offs 
-                    # will work as it resets the time sensitive headers.
-                    _add_date_header(request)
-                    self.authentication.sign_request(request)
-
-                    # Set the request context
-                    retry_context.request = request
-
-                    # Log the request before it goes out
-                    logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                request.method,
-                                request.path,
-                                request.query,
-                                str(request.headers).replace('\n', ''))
-
-                    # Perform the request
-                    response = self._httpclient.perform_request(request)
-
-                    # Execute the response callback
-                    if self.response_callback:
-                        self.response_callback(response)
-
-                    # Set the response context
-                    retry_context.response = response
-
-                    # Log the response when it comes back
-                    logger.info("%s Receiving Response: "
-                                "%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                self.extract_date_and_request_id(retry_context),
-                                response.status,
-                                response.message,
-                                str(request.headers).replace('\n', ''))
-
-                    # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
-                    if response.status >= 300:
-                        # This exception will be caught by the general error handler
-                        # and raised as an azure http exception
-                        _http_error_handler(
-                            HTTPError(response.status, response.message, response.headers, response.body))
-
-                    # Parse the response
-                    if parser:
-                        if parser_args:
-                            args = [response]
-                            args.extend(parser_args)
-                            return parser(*args)
-                        else:
-                            return parser(response)
-                    else:
-                        return
-                except AzureException as ex:
-                    retry_context.exception = ex
-                    raise ex
-                except Exception as ex:
-                    retry_context.exception = ex
-                    if sys.version_info >= (3,):
-                        # Automatic chaining in Python 3 means we keep the trace
-                        raise AzureException(ex.args[0])
-                    else:
-                        # There isn't a good solution in 2 for keeping the stack trace 
-                        # in general, or that will not result in an error in 3
-                        # However, we can keep the previous error type and message
-                        # TODO: In the future we will log the trace
-                        msg = ""
-                        if len(ex.args) > 0:
-                            msg = ex.args[0]
-                        raise AzureException('{}: {}'.format(ex.__class__.__name__, msg))
-
-            except AzureException as ex:
-                # only parse the strings used for logging if logging is at least enabled for CRITICAL
-                if logger.isEnabledFor(logging.CRITICAL):
-                    exception_str_in_one_line = str(ex).replace('\n', '')
-                    status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
-                    timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
-
-                logger.info("%s Operation failed: checking if the operation should be retried. "
-                            "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
-                            client_request_id_prefix,
-                            retry_context.count if hasattr(retry_context, 'count') else 0,
-                            timestamp_and_request_id,
-                            status_code,
-                            exception_str_in_one_line)
-
-                # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
-                # will not be resolved with retries.
-                if str(ex) == _ERROR_DECRYPTION_FAILURE:
-                    logger.error("%s Encountered decryption failure: this cannot be retried. "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-
-                # Determine whether a retry should be performed and if so, how 
-                # long to wait before performing retry.
-                retry_interval = self.retry(retry_context)
-                if retry_interval is not None:
-                    # Execute the callback
-                    if self.retry_callback:
-                        self.retry_callback(retry_context)
-
-                    logger.info(
-                        "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
-                        client_request_id_prefix,
-                        retry_context.count,
-                        retry_interval)
-
-                    # Sleep for the desired retry interval
-                    sleep(retry_interval)
-                else:
-                    logger.error("%s Retry policy did not allow for a retry: "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-            finally:
-                # If this is a location locked operation and the location is not set, 
-                # this is the first request of that operation. Set the location to 
-                # be used for subsequent requests in the operation.
-                if operation_context.location_lock and not operation_context.host_location:
-                    operation_context.host_location = {retry_context.location_mode: request.host}
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,29 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from .fileservice import FileService
-from .models import (
-    Share,
-    ShareProperties,
-    File,
-    FileProperties,
-    Directory,
-    DirectoryProperties,
-    FileRange,
-    ContentSettings,
-    CopyProperties,
-    SharePermissions,
-    FilePermissions,
-    DeleteSnapshot,
-)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '0.36.0'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-04-17'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,250 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from .models import (
-    Share,
-    Directory,
-    File,
-    FileProperties,
-    FileRange,
-    ShareProperties,
-    DirectoryProperties,
-)
-from ..common.models import (
-    _list,
-)
-from ..common._deserialization import (
-    _parse_properties,
-    _parse_metadata,
-)
-from ..common._error import _validate_content_match
-from ..common._common_conversion import (
-    _get_content_md5,
-    _to_str,
-)
-
-def _parse_snapshot_share(response, name):
-    '''
-    Extracts snapshot return header.
-    '''
-    snapshot = response.headers.get('x-ms-snapshot')
-
-    return _parse_share(response, name, snapshot)
-
-def _parse_share(response, name, snapshot=None):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ShareProperties)
-    return Share(name, props, metadata, snapshot)
-
-
-def _parse_directory(response, name):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, DirectoryProperties)
-    return Directory(name, props, metadata)
-
-
-def _parse_file(response, name, validate_content=False):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, FileProperties)
-
-    # For range gets, only look at 'x-ms-content-md5' for overall MD5
-    content_settings = getattr(props, 'content_settings')
-    if 'content-range' in response.headers:
-        if 'x-ms-content-md5' in response.headers:
-            setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5']))
-        else:
-            delattr(content_settings, 'content_md5')
-
-    if validate_content:
-        computed_md5 = _get_content_md5(response.body)
-        _validate_content_match(response.headers['content-md5'], computed_md5)
-
-    return File(name, response.body, props, metadata)
-
-
-def _convert_xml_to_shares(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults AccountName="https://myaccount.file.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Shares>
-        <Share>
-          <Name>share-name</Name>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Quota>max-share-size</Quota>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Share>
-      </Shares>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    shares = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(shares, 'next_marker', next_marker)
-
-    shares_element = list_element.find('Shares')
-
-    for share_element in shares_element.findall('Share'):
-        # Name element
-        share = Share()
-        share.name = share_element.findtext('Name')
-
-        # Snapshot
-        share.snapshot = share_element.findtext('Snapshot')
-
-        # Metadata
-        metadata_root_element = share_element.find('Metadata')
-        if metadata_root_element is not None:
-            share.metadata = dict()
-            for metadata_element in metadata_root_element:
-                share.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = share_element.find('Properties')
-        share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        share.properties.etag = properties_element.findtext('Etag')
-        share.properties.quota = int(properties_element.findtext('Quota'))
-
-        # Add share to list
-        shares.append(share)
-
-    return shares
-
-
-def _convert_xml_to_directories_and_files(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.file.core.windows.net/" ShareName="myshare" DirectoryPath="directory-path">
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Entries>
-        <File>
-          <Name>file-name</Name>
-          <Properties>
-            <Content-Length>size-in-bytes</Content-Length>
-          </Properties>
-        </File>
-        <Directory>
-          <Name>directory-name</Name>
-        </Directory>
-      </Entries>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    entries = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(entries, 'next_marker', next_marker)
-
-    entries_element = list_element.find('Entries')
-
-    for file_element in entries_element.findall('File'):
-        # Name element
-        file = File()
-        file.name = file_element.findtext('Name')
-
-        # Properties
-        properties_element = file_element.find('Properties')
-        file.properties.content_length = int(properties_element.findtext('Content-Length'))
-
-        # Add file to list
-        entries.append(file)
-
-    for directory_element in entries_element.findall('Directory'):
-        # Name element
-        directory = Directory()
-        directory.name = directory_element.findtext('Name')
-
-        # Add directory to list
-        entries.append(directory)
-
-    return entries
-
-
-def _convert_xml_to_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <Ranges>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-    </Ranges>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    ranges = list()
-    ranges_element = ETree.fromstring(response.body)
-
-    for range_element in ranges_element.findall('Range'):
-        # Parse range
-        range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End')))
-
-        # Add range to list
-        ranges.append(range)
-
-    return ranges
-
-
-def _convert_xml_to_share_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <ShareStats>
-       <ShareUsage>15</ShareUsage>
-    </ShareStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    share_stats_element = ETree.fromstring(response.body)
-    return int(share_stats_element.findtext('ShareUsage'))
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_download_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_download_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_download_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_download_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,116 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import threading
-
-from ..common._error import _ERROR_NO_SINGLE_THREAD_CHUNKING
-
-
-def _download_file_chunks(file_service, share_name, directory_name, file_name,
-                          download_size, block_size, progress, start_range, end_range, 
-                          stream, max_connections, progress_callback, validate_content, 
-                          timeout, operation_context, snapshot):
-    if max_connections <= 1:
-        raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('file'))
-
-    downloader = _FileChunkDownloader(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        download_size,
-        block_size,
-        progress,
-        start_range,
-        end_range,
-        stream,
-        progress_callback,
-        validate_content,
-        timeout,
-        operation_context,
-        snapshot,
-    )
-
-    import concurrent.futures
-    executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-    result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-
-
-class _FileChunkDownloader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name, 
-                 download_size, chunk_size, progress, start_range, end_range, 
-                 stream, progress_callback, validate_content, timeout, operation_context, snapshot):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.chunk_size = chunk_size
-
-        self.download_size = download_size
-        self.start_index = start_range
-        self.file_end = end_range
-
-        self.stream = stream
-        self.stream_start = stream.tell()
-        self.stream_lock = threading.Lock()
-        self.progress_callback = progress_callback
-        self.progress_total = progress
-        self.progress_lock = threading.Lock()
-        self.validate_content = validate_content
-        self.timeout = timeout
-        self.operation_context = operation_context
-        self.snapshot = snapshot
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.file_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.file_end:
-            chunk_end = self.file_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total = self.progress_total
-                self.progress_callback(total, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-            self.stream.write(chunk_data)
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        return self.file_service._get_file(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            start_range=chunk_start,
-            end_range=chunk_end - 1,
-            validate_content=self.validate_content,
-            timeout=self.timeout,
-            _context=self.operation_context,
-            snapshot=self.snapshot
-        )
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,75 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _str
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-
-
-def _get_path(share_name=None, directory_name=None, file_name=None):
-    '''
-    Creates the path to access a file resource.
-
-    share_name:
-        Name of share.
-    directory_name:
-        The path to the directory.
-    file_name:
-        Name of file.
-    '''
-    if share_name and directory_name and file_name:
-        return '/{0}/{1}/{2}'.format(
-            _str(share_name),
-            _str(directory_name),
-            _str(file_name))
-    elif share_name and directory_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(directory_name))
-    elif share_name and file_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(file_name))
-    elif share_name:
-        return '/{0}'.format(_str(share_name))
-    else:
-        return '/'
-
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
-                                       end_range_required=True, check_content_md5=False):
-    # If end range is provided, start range must be provided
-    if start_range_required or end_range is not None:
-        _validate_not_none('start_range', start_range)
-    if end_range_required:
-        _validate_not_none('end_range', end_range)
-
-    # Format based on whether end_range is present
-    request.headers = request.headers or {}
-    if end_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers['x-ms-range-get-content-md5'] = 'true'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_upload_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_upload_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/_upload_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/_upload_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,142 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import threading
-
-
-def _upload_file_chunks(file_service, share_name, directory_name, file_name,
-                        file_size, block_size, stream, max_connections,
-                        progress_callback, validate_content, timeout):
-    uploader = _FileChunkUploader(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        file_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        timeout
-    )
-
-    if progress_callback is not None:
-        progress_callback(0, file_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
-    else:
-        if file_size is not None:
-            range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
-        else:
-            range_ids = uploader.process_all_unknown_size()
-
-    return range_ids
-
-
-class _FileChunkUploader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name,
-                 file_size, chunk_size, stream, parallel, progress_callback,
-                 validate_content, timeout):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.file_size = file_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock() if parallel else None
-        self.validate_content = validate_content
-        self.timeout = timeout
-
-    def get_chunk_offsets(self):
-        index = 0
-        if self.file_size is None:
-            # we don't know the size of the stream, so we have no
-            # choice but to seek
-            while True:
-                data = self._read_from_stream(index, 1)
-                if not data:
-                    break
-                yield index
-                index += self.chunk_size
-        else:
-            while index < self.file_size:
-                yield index
-                index += self.chunk_size
-
-    def process_chunk(self, chunk_offset):
-        size = self.chunk_size
-        if self.file_size is not None:
-            size = min(size, self.file_size - chunk_offset)
-        chunk_data = self._read_from_stream(chunk_offset, size)
-        return self._upload_chunk_with_progress(chunk_offset, chunk_data)
-
-    def process_all_unknown_size(self):
-        assert self.stream_lock is None
-        range_ids = []
-        index = 0
-        while True:
-            data = self._read_from_stream(None, self.chunk_size)
-            if data:
-                index += len(data)
-                range_id = self._upload_chunk_with_progress(index, data)
-                range_ids.append(range_id)
-            else:
-                break
-
-        return range_ids
-
-    def _read_from_stream(self, offset, count):
-        if self.stream_lock is not None:
-            with self.stream_lock:
-                self.stream.seek(self.stream_start + offset)
-                data = self.stream.read(count)
-        else:
-            data = self.stream.read(count)
-        return data
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.file_size)
-
-    def _upload_chunk_with_progress(self, chunk_start, chunk_data):
-        chunk_end = chunk_start + len(chunk_data) - 1
-        self.file_service.update_range(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            chunk_data,
-            chunk_start,
-            chunk_end,
-            self.validate_content,
-            timeout=self.timeout
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end)
-        self._update_progress(len(chunk_data))
-        return range_id
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/fileservice.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/fileservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/fileservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/fileservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2477 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from azure.common import AzureHttpError
-
-from ..common._auth import (
-    _StorageSharedKeyAuthentication,
-    _StorageSASAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _get_content_md5,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _parse_metadata,
-    _parse_properties,
-    _parse_length_from_content_range,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_STORAGE_MISSING_INFO,
-    _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-    _validate_access_policies,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _get_data_bytes_only,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    FileSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_shares,
-    _convert_xml_to_directories_and_files,
-    _convert_xml_to_ranges,
-    _convert_xml_to_share_stats,
-    _parse_file,
-    _parse_share,
-    _parse_snapshot_share,
-    _parse_directory,
-)
-from ._download_chunking import _download_file_chunks
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import _upload_file_chunks
-from .models import (
-    FileProperties,
-)
-
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class FileService(StorageClient):
-    '''
-    The Server Message Block (SMB) protocol is the preferred file share protocol
-    used on premise today. The Microsoft Azure File service enables customers to
-    leverage the availability and scalability of Azure's Cloud Infrastructure as
-    a Service (IaaS) SMB without having to rewrite SMB client applications.
-
-    The Azure File service also offers a compelling alternative to traditional
-    Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which
-    are often complex and expensive to install, configure, and operate.
-
-    :ivar int MAX_SINGLE_GET_SIZE:
-        The size of the first range get performed by get_file_to_* methods if
-        max_connections is greater than 1. Less data will be returned if the
-        file is smaller than this.
-    :ivar int MAX_CHUNK_GET_SIZE:
-        The size of subsequent range gets performed by get_file_to_* methods if
-        max_connections is greater than 1 and the file is larger than MAX_SINGLE_GET_SIZE.
-        Less data will be returned if the remainder of the file is smaller than
-        this. If this is set to larger than 4MB, content_validation will throw an
-        error if enabled. However, if content_validation is not desired a size
-        greater than 4MB may be optimal. Setting this below 4MB is not recommended.
-    :ivar int MAX_RANGE_SIZE:
-        The size of the ranges put by create_file_from_* methods. Smaller ranges
-        may be put if there is less data provided. The maximum range size the service
-        supports is 4MB.
-    '''
-    MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024
-    MAX_RANGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests
-            signed with an account key and to construct the storage endpoint. It
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests
-             instead of the account key. If account key and sas token are both
-             specified, account key will be used to sign.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use the China cloud
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'file',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(FileService, self).__init__(service_params)
-
-        if self.account_name == DEV_ACCOUNT_NAME:
-            raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def make_file_url(self, share_name, directory_name, file_name,
-                      protocol=None, sas_token=None):
-        '''
-        Creates the url to access a file.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when FileService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: file access URL.
-        :rtype: str
-        '''
-
-        if directory_name is None:
-            url = '{}://{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                file_name,
-            )
-        else:
-            url = '{}://{}/{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                directory_name,
-                file_name,
-            )
-
-        if sas_token:
-            url += '?' + sas_token
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the file service.
-        Use the returned signature with the sas_token parameter of the FileService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.FILE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_share_shared_access_signature(self, share_name,
-                                               permission=None,
-                                               expiry=None,
-                                               start=None,
-                                               id=None,
-                                               ip=None,
-                                               protocol=None,
-                                               cache_control=None,
-                                               content_disposition=None,
-                                               content_encoding=None,
-                                               content_language=None,
-                                               content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use :func:`~set_share_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_share(
-            share_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_file_shared_access_signature(self, share_name,
-                                              directory_name=None,
-                                              file_name=None,
-                                              permission=None,
-                                              expiry=None,
-                                              start=None,
-                                              id=None,
-                                              ip=None,
-                                              protocol=None,
-                                              cache_control=None,
-                                              content_disposition=None,
-                                              content_encoding=None,
-                                              content_language=None,
-                                              content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_file(
-            share_name,
-            directory_name,
-            file_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def set_file_service_properties(self, hour_metrics=None, minute_metrics=None,
-                                    cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's File service, including
-        Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for files.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for files.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors))
-
-        self._perform_request(request)
-
-    def get_file_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's File service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The file service properties.
-        :rtype:
-            :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def list_shares(self, prefix=None, marker=None, num_results=None,
-                    include_metadata=False, timeout=None, include_snapshots=False):
-        '''
-        Returns a generator to list the shares under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all shares have been returned or num_results 
-        is reached.
-
-        If num_results is specified and the account has more than that number of 
-        shares, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of shares to return.
-        :param bool include_metadata:
-            Specifies that share metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param bool include_snapshots:
-            Specifies that share snapshots be returned in the response.
-        '''
-        include = 'snapshots' if include_snapshots else None
-        if include_metadata:
-            if include is not None:
-                include = include + ',metadata'
-            else:
-                include = 'metadata'
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_shares(**kwargs)
-
-        return ListGenerator(resp, self._list_shares, (), kwargs)
-
-    def _list_shares(self, prefix=None, marker=None, max_results=None,
-                     include=None, timeout=None, _context=None):
-        '''
-        Returns a list of the shares under the specified account.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of shares to return. A single list
-            request may return up to 1000 shares and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param string include:
-            Include this parameter to specify that either the share's
-            metadata, snapshots or both be returned as part of the response body. set this
-            parameter to string 'metadata' to get share's metadata. set this parameter to 'snapshots'
-            to get all the share snapshots. for both use 'snapshots,metadata'.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_shares, operation_context=_context)
-
-    def create_share(self, share_name, metadata=None, quota=None,
-                     fail_on_exist=False, timeout=None):
-        '''
-        Creates a new share under the specified account. If the share
-        with the same name already exists, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of share to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str)
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5TB (5120).
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the share exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if share is created, False if share already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None):
-        '''
-        Creates a snapshot of an existing share under the specified account.
-
-        :param str share_name:
-            The name of the share to create a snapshot of.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: a dict of str to str:
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be
-            greater than 0, and less than or equal to 5TB (5120).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: azure.storage.file.models.Share
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp':  'snapshot',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_snapshot_share, [share_name])
-
-    def get_share_properties(self, share_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified share. The data returned does not include the shares's
-        list of files or directories.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A Share that exposes properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.Share`
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-             'restype': 'share',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_share, [share_name])
-
-    def set_share_properties(self, share_name, quota, timeout=None):
-        '''
-        Sets service-defined properties for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5 TB (5120 GB).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('quota', quota)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-
-        self._perform_request(request)
-
-    def get_share_metadata(self, share_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the share metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-             'restype': 'share',
-             'comp': 'metadata',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_share_metadata(self, share_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        share. Each call to this operation replaces all existing metadata
-        attached to the share. To remove all metadata from the share,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param metadata:
-            A dict containing name-value pairs to associate with the share as 
-            metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def get_share_acl(self, share_name, timeout=None):
-        '''
-        Gets the permissions for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the share.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_share_acl(self, share_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets the permissions for the specified share or stored access 
-        policies that may be used with Shared Access Signatures.
-
-        :param str share_name:
-            Name of existing share.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the share. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        self._perform_request(request)
-
-    def get_share_stats(self, share_name, timeout=None):
-        '''
-        Gets the approximate size of the data stored on the share,
-        rounded up to the nearest gigabyte.
-        
-        Note that this value may not include all recently created
-        or recently resized files.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the approximate size of the data stored on the share.
-        :rtype: int
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_share_stats)
-
-    def delete_share(self, share_name, fail_not_exist=False, timeout=None, snapshot=None, delete_snapshots=None):
-        '''
-        Marks the specified share for deletion. If the share
-        does not exist, the operation fails on the service. By 
-        default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of share to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the share doesn't
-            exist. False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-            Specify this argument to delete a specific snapshot only.
-            delete_snapshots must be None if this is specified.
-        :param ~azure.storage.file.models.DeleteSnapshot delete_snapshots:
-            To delete a share that has snapshots, this must be specified as DeleteSnapshot.Include.
-        :return: True if share is deleted, False share doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.headers = {
-            'x-ms-delete-snapshots': _to_str(delete_snapshots)
-        }
-        request.query = {
-             'restype': 'share',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def create_directory(self, share_name, directory_name, metadata=None,
-                         fail_on_exist=False, timeout=None):
-        '''
-        Creates a new directory under the specified share or parent directory. 
-        If the directory with the same name already exists, the operation fails
-        on the service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to create, including the path to the parent 
-            directory.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str):
-        :param bool fail_on_exist:
-            specify whether to throw an exception when the directory exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is created, False if directory already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def delete_directory(self, share_name, directory_name,
-                         fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified empty directory. Note that the directory must
-        be empty before it can be deleted. Attempting to delete directories 
-        that are not empty will fail.
-
-        If the directory does not exist, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to delete, including the path to the parent 
-            directory.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the directory doesn't
-            exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is deleted, False otherwise.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_directory_properties(self, share_name, directory_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified directory. The data returned does not include the directory's
-        list of files.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-           The path to an existing directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified directory within a directory object.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :rtype: :class:`~azure.storage.file.models.Directory`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-             'restype': 'directory',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_directory, [directory_name])
-
-    def get_directory_metadata(self, share_name, directory_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified directory.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the directory metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-             'restype': 'directory',
-             'comp': 'metadata',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        directory. Each call to this operation replaces all existing metadata
-        attached to the directory. To remove all metadata from the directory,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param metadata:
-            A dict containing name-value pairs to associate with the directory
-            as metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def list_directories_and_files(self, share_name, directory_name=None,
-                                   num_results=None, marker=None, timeout=None,
-                                   prefix=None, snapshot=None):
-
-        '''
-        Returns a generator to list the directories and files under the specified share.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all directories and files have been returned or
-        num_results is reached.
-
-        If num_results is specified and the share has more than that number of 
-        files and directories, the generator will have a populated next_marker 
-        field once it finishes. This marker can be used to create a new generator 
-        if more results are desired.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int num_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str prefix:
-            List only the files and/or directories with the given prefix.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (share_name, directory_name)
-        kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout,
-                  '_context': operation_context, 'prefix': prefix, 'snapshot': snapshot}
-
-        resp = self._list_directories_and_files(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_directories_and_files, args, kwargs)
-
-    def _list_directories_and_files(self, share_name, directory_name=None,
-                                   marker=None, max_results=None, timeout=None,
-                                    prefix=None, _context=None, snapshot=None):
-        '''
-        Returns a list of the directories and files under the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str prefix:
-            List only the files and/or directories with the given prefix.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-             'restype': 'directory',
-             'comp': 'list',
-             'prefix': _to_str(prefix),
-             'marker': _to_str(marker),
-             'maxresults': _int_to_str(max_results),
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _convert_xml_to_directories_and_files,
-                                     operation_context=_context)
-
-    def get_file_properties(self, share_name, directory_name, file_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file. Returns an instance of :class:`~azure.storage.file.models.File` with
-        :class:`~azure.storage.file.models.FileProperties` and a metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: a file object including properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)}
-
-        return self._perform_request(request, _parse_file, [file_name])
-
-    def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None):
-        '''
-        Returns a boolean indicating whether the share exists if only share name is
-        given. If directory_name is specificed a boolean will be returned indicating
-        if the directory exists. If file_name is specified as well, a boolean will be
-        returned indicating if the file exists.
-
-        :param str share_name:
-            Name of a share.
-        :param str directory_name:
-            The path to a directory.
-        :param str file_name:
-            Name of a file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        try:
-            if file_name is not None:
-                self.get_file_properties(share_name, directory_name, file_name, timeout=timeout, snapshot=snapshot)
-            elif directory_name is not None:
-                self.get_directory_properties(share_name, directory_name, timeout=timeout, snapshot=snapshot)
-            else:
-                self.get_share_properties(share_name, timeout=timeout, snapshot=snapshot)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def resize_file(self, share_name, directory_name,
-                    file_name, content_length, timeout=None):
-        '''
-        Resizes a file to the specified size. If the specified byte
-        value is less than the current size of the file, then all
-        ranges above the specified byte value are cleared.
-        
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int content_length:
-            The length to resize the file to.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-content-length': _to_str(content_length)
-        }
-
-        self._perform_request(request)
-
-    def set_file_properties(self, share_name, directory_name, file_name,
-                            content_settings, timeout=None):
-        '''
-        Sets system properties on the file. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set the file properties.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_settings', content_settings)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = content_settings._to_headers()
-
-        self._perform_request(request)
-
-    def get_file_metadata(self, share_name, directory_name, file_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the file metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-             'comp': 'metadata',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_file_metadata(self, share_name, directory_name,
-                          file_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified file as one or more
-        name-value pairs.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the file. To remove all
-            metadata from the file, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def copy_file(self, share_name, directory_name, file_name, copy_source,
-                  metadata=None, timeout=None):
-        '''
-        Copies a file asynchronously. This operation returns a copy operation 
-        properties object, including a copy ID you can use to check or abort the 
-        copy operation. The File service copies files on a best-effort basis.
-
-        If the destination file exists, it will be overwritten. The destination 
-        file cannot be modified while the copy operation is in progress.
-
-        :param str share_name:
-            Name of the destination share. The share must exist.
-        :param str directory_name:
-            Name of the destination directory. The directory must exist.
-        :param str file_name:
-            Name of the destination file. If the destination file exists, it will 
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob. 
-            The value should be URL-encoded as it would appear in a request URI. 
-            If the source is in another account, the source must either be public 
-            or must be authenticated via a shared access signature. If the source 
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.file.core.windows.net/myshare/mydir/myfile
-            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
-        :param metadata:
-            Name-value pairs associated with the file as metadata. If no name-value 
-            pairs are specified, the operation will copy the metadata from the 
-            source blob or file to the destination file. If one or more name-value 
-            pairs are specified, the destination file is created with the specified 
-            metadata, and the metadata is not copied from the source blob or file. 
-        :type metadata: dict(str, str).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.file.models.CopyProperties`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_source', copy_source)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-copy-source': _to_str(copy_source),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_properties, [FileProperties]).copy
-
-    def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None):
-        '''
-         Aborts a pending copy_file operation, and leaves a destination file
-         with zero length and full metadata.
-
-        :param str share_name:
-             Name of destination share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-             Name of destination file.
-        :param str copy_id:
-            Copy identifier provided in the copy.id of the original
-            copy_file operation.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'copy',
-            'copyid': _to_str(copy_id),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-copy-action': 'abort',
-        }
-
-        self._perform_request(request)
-
-    def delete_file(self, share_name, directory_name, file_name, timeout=None):
-        '''
-        Marks the specified file for deletion. The file is later
-        deleted during garbage collection.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-
-        self._perform_request(request)
-
-    def create_file(self, share_name, directory_name, file_name,
-                    content_length, content_settings=None, metadata=None,
-                    timeout=None):
-        '''
-        Creates a new file.
-
-        See create_file_from_* for high level functions that handle the
-        creation and upload of large files with automatic chunking and
-        progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param int content_length:
-            Length of the file in bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-content-length': _to_str(content_length),
-            'x-ms-type': 'file'
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        self._perform_request(request)
-
-    def create_file_from_path(self, share_name, directory_name, file_name,
-                              local_file_path, content_settings=None,
-                              metadata=None, validate_content=False, progress_callback=None,
-                              max_connections=2, timeout=None):
-        '''
-        Creates a new azure file from a local file path, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str local_file_path:
-            Path of the local file to upload as the file content.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used for setting file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('local_file_path', local_file_path)
-
-        count = path.getsize(local_file_path)
-        with open(local_file_path, 'rb') as stream:
-            self.create_file_from_stream(
-                share_name, directory_name, file_name, stream,
-                count, content_settings, metadata, validate_content, progress_callback,
-                max_connections, timeout)
-
-    def create_file_from_text(self, share_name, directory_name, file_name,
-                              text, encoding='utf-8', content_settings=None,
-                              metadata=None, validate_content=False, timeout=None):
-        '''
-        Creates a new file from str/unicode, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str text:
-            Text to upload to the file.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        self.create_file_from_bytes(
-            share_name, directory_name, file_name, text, count=len(text),
-            content_settings=content_settings, metadata=metadata,
-            validate_content=validate_content, timeout=timeout)
-
-    def create_file_from_bytes(
-            self, share_name, directory_name, file_name, file,
-            index=0, count=None, content_settings=None, metadata=None,
-            validate_content=False, progress_callback=None, max_connections=2,
-            timeout=None):
-        '''
-        Creates a new file from an array of bytes, or updates the content
-        of an existing file, with automatic chunking and progress
-        notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str file:
-            Content of file as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file', file)
-        _validate_type_bytes('file', file)
-
-        if index < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(file) - index
-
-        stream = BytesIO(file)
-        stream.seek(index)
-
-        self.create_file_from_stream(
-            share_name, directory_name, file_name, stream, count,
-            content_settings, metadata, validate_content, progress_callback,
-            max_connections, timeout)
-
-    def create_file_from_stream(
-            self, share_name, directory_name, file_name, stream, count,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, timeout=None):
-        '''
-        Creates a new file from a file/stream, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the file content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a
-            file cannot be created if the count is unknown.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use. Note that parallel upload 
-            requires the stream to be seekable.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-
-        if count < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        self.create_file(
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            content_settings,
-            metadata,
-            timeout
-        )
-
-        _upload_file_chunks(
-            self,
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            self.MAX_RANGE_SIZE,
-            stream,
-            max_connections,
-            progress_callback,
-            validate_content,
-            timeout
-        )
-
-    def _get_file(self, share_name, directory_name, file_name,
-                 start_range=None, end_range=None, validate_content=False,
-                 timeout=None, _context=None, snapshot=None):
-        '''
-        Downloads a file's content, metadata, and properties. You can specify a
-        range if you don't need to download the file in its entirety. If no range
-        is specified, the full file will be downloaded.
-
-        See get_file_to_* for high level functions that handle the download
-        of large files with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            When this is set to True and specified together with the Range header, 
-            the service returns the MD5 hash for the range, as long as the range 
-            is less than or equal to 4 MB in size.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)}
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=validate_content)
-
-        return self._perform_request(request, _parse_file,
-                                     [file_name, validate_content],
-                                     operation_context=_context)
-
-    def get_file_to_path(self, share_name, directory_name, file_name, file_path,
-                         open_mode='wb', start_range=None, end_range=None,
-                         validate_content=False, progress_callback=None,
-                         max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file to a file path, with automatic chunking and progress
-        notifications. Returns an instance of File with properties and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str file_path:
-            Path of file to write to.
-        :param str open_mode:
-            Mode to use when opening the file. Note that specifying append only 
-            open_mode prevents parallel download. So, max_connections must be set 
-            to 1 if this open_mode is used.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        if max_connections > 1 and 'a' in open_mode:
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        with open(file_path, open_mode) as stream:
-            file = self.get_file_to_stream(
-                share_name, directory_name, file_name, stream,
-                start_range, end_range, validate_content,
-                progress_callback, max_connections, timeout, snapshot)
-
-        return file
-
-    def get_file_to_stream(
-        self, share_name, directory_name, file_name, stream,
-        start_range=None, end_range=None, validate_content=False,
-        progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties
-        and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param io.IOBase stream:
-            Opened file/stream to write to.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-
-        # If the user explicitly sets max_connections to 1, do a single shot download
-        if max_connections == 1:
-            file = self._get_file(share_name,
-                                  directory_name,
-                                  file_name,
-                                  start_range=start_range,
-                                  end_range=end_range,
-                                  validate_content=validate_content,
-                                  timeout=timeout,
-                                  snapshot=snapshot)
-
-            # Set the download size
-            download_size = file.properties.content_length
-
-        # If max_connections is greater than 1, do the first get to establish the 
-        # size of the file and get the first segment of data
-        else:
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-            # The service only provides transactional MD5s for chunks under 4MB.           
-            # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first 
-            # chunk so a transactional MD5 can be retrieved.
-            first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
-
-            initial_request_start = start_range if start_range else 0
-
-            if end_range is not None and end_range - start_range < first_get_size:
-                initial_request_end = end_range
-            else:
-                initial_request_end = initial_request_start + first_get_size - 1
-
-            # Send a context object to make sure we always retry to the initial location
-            operation_context = _OperationContext(location_lock=True)
-            try:
-                file = self._get_file(share_name,
-                                      directory_name,
-                                      file_name,
-                                      start_range=initial_request_start,
-                                      end_range=initial_request_end,
-                                      validate_content=validate_content,
-                                      timeout=timeout,
-                                      _context=operation_context,
-                                      snapshot=snapshot)
-
-                # Parse the total file size and adjust the download size if ranges 
-                # were specified
-                file_size = _parse_length_from_content_range(file.properties.content_range)
-                if end_range:
-                    # Use the end_range unless it is over the end of the file
-                    download_size = min(file_size, end_range - start_range + 1)
-                elif start_range:
-                    download_size = file_size - start_range
-                else:
-                    download_size = file_size
-            except AzureHttpError as ex:
-                if not start_range and ex.status_code == 416:
-                    # Get range will fail on an empty file. If the user did not 
-                    # request a range, do a regular get request in order to get 
-                    # any properties.
-                    file = self._get_file(share_name,
-                                          directory_name,
-                                          file_name,
-                                          validate_content=validate_content,
-                                          timeout=timeout,
-                                          _context=operation_context,
-                                          snapshot=snapshot)
-
-                    # Set the download size to empty
-                    download_size = 0
-                else:
-                    raise ex
-
-        # Mark the first progress chunk. If the file is small or this is a single 
-        # shot download, this is the only call
-        if progress_callback:
-            progress_callback(file.properties.content_length, download_size)
-
-        # Write the content to the user stream  
-        # Clear file content since output has been written to user stream   
-        if file.content is not None:
-            stream.write(file.content)
-            file.content = None
-
-        # If the file is small or single shot download was used, the download is 
-        # complete at this point. If file size is large, use parallel download.
-        if file.properties.content_length != download_size:
-            # At this point would like to lock on something like the etag so that 
-            # if the file is modified, we dont get a corrupted download. However, 
-            # this feature is not yet available on the file service.
-
-            end_file = file_size
-            if end_range:
-                # Use the end_range unless it is over the end of the file
-                end_file = min(file_size, end_range + 1)
-
-            _download_file_chunks(
-                self,
-                share_name,
-                directory_name,
-                file_name,
-                download_size,
-                self.MAX_CHUNK_GET_SIZE,
-                first_get_size,
-                initial_request_end + 1,  # start where the first download ended
-                end_file,
-                stream,
-                max_connections,
-                progress_callback,
-                validate_content,
-                timeout,
-                operation_context,
-                snapshot
-            )
-
-            # Set the content length to the download size instead of the size of 
-            # the last range
-            file.properties.content_length = download_size
-
-            # Overwrite the content range to the user requested range
-            file.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, file_size)
-
-            # Overwrite the content MD5 as it is the MD5 for the last range instead 
-            # of the stored MD5
-            # TODO: Set to the stored MD5 when the service returns this
-            file.properties.content_md5 = None
-
-        return file
-
-    def get_file_to_bytes(self, share_name, directory_name, file_name,
-                          start_range=None, end_range=None, validate_content=False,
-                          progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`~azure.storage.file.models.File` with
-        properties, metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-
-        stream = BytesIO()
-        file = self.get_file_to_stream(
-            share_name,
-            directory_name,
-            file_name,
-            stream,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            timeout,
-            snapshot)
-
-        file.content = stream.getvalue()
-        return file
-
-    def get_file_to_text(
-        self, share_name, directory_name, file_name, encoding='utf-8',
-        start_range=None, end_range=None, validate_content=False,
-        progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties,
-        metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str encoding:
-            Python encoding to use when decoding the file data.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('encoding', encoding)
-
-        file = self.get_file_to_bytes(
-            share_name,
-            directory_name,
-            file_name,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            timeout,
-            snapshot)
-
-        file.content = file.content.decode(encoding)
-        return file
-
-    def update_range(self, share_name, directory_name, file_name, data,
-                     start_range, end_range, validate_content=False, timeout=None):
-        '''
-        Writes the bytes specified by the request body into the specified range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bytes data:
-            Content of the range.
-        :param int start_range:
-            Start of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('data', data)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'range',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-write': 'update',
-        }
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-        request.body = _get_data_bytes_only('data', data)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        self._perform_request(request)
-
-    def clear_range(self, share_name, directory_name, file_name, start_range,
-                    end_range, timeout=None):
-        '''
-        Clears the specified range and releases the space used in storage for 
-        that range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'range',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'Content-Length': '0',
-            'x-ms-write': 'clear',
-        }
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-
-        self._perform_request(request)
-
-    def list_ranges(self, share_name, directory_name, file_name,
-                    start_range=None, end_range=None, timeout=None, snapshot=None):
-        '''
-        Retrieves the valid ranges for a file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Specifies the start offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            Specifies the end offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :returns: a list of valid ranges
-        :rtype: a list of :class:`~azure.storage.file.models.FileRange`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-             'comp': 'rangelist',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False)
-
-        return self._perform_request(request, _convert_xml_to_ranges)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/models.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,416 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _to_str
-
-
-class Share(object):
-    '''
-    File share class.
-    
-    :ivar str name:
-        The name of the share.
-    :ivar ShareProperties properties:
-        System properties for the share.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the share as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list shares operation. If this parameter was specified but the 
-        share has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict mapping str to str
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None, snapshot=None):
-        self.name = name
-        self.properties = props or ShareProperties()
-        self.metadata = metadata
-        self.snapshot = snapshot
-
-
-class ShareProperties(object):
-    '''
-    File share's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the share was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int quote:
-        Returns the current share quota in GB.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.quota = None
-
-
-class Directory(object):
-    '''
-    Directory class.
-    
-    :ivar str name:
-        The name of the directory.
-    :ivar DirectoryProperties properties:
-        System properties for the directory.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the directory as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list directory operation. If this parameter was specified but the 
-        directory has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or DirectoryProperties()
-        self.metadata = metadata
-
-
-class DirectoryProperties(object):
-    '''
-    File directory's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the directory was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar bool server_encrypted:
-        Set to true if the directory metadata is encrypted on the server.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.server_encrypted = None
-
-
-class File(object):
-    '''
-    File class.
-    
-    :ivar str name:
-        The name of the file.
-    :ivar content:
-        File content.
-    :vartype content: str or bytes
-    :ivar FileProperties properties:
-        System properties for the file.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the file as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list file operation. If this parameter was specified but the 
-        file has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self, name=None, content=None, props=None, metadata=None):
-        self.name = name
-        self.content = content
-        self.properties = props or FileProperties()
-        self.metadata = metadata
-
-
-class FileProperties(object):
-    '''
-    File Properties.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the file was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The length of the content returned. If the entire blob was requested, 
-        the length of blob in bytes. If a subset of the blob was requested, the 
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client 
-        requested a subset of the blob.
-    :ivar ~azure.storage.file.models.ContentSettings content_settings:
-        Stores all the content settings for the file.
-    :ivar ~azure.storage.file.models.CopyProperties copy:
-        Stores all the copy properties for the file.
-    ivar bool server_encrypted:
-        Set to true if the file data and application metadata are completely encrypted.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_range = None
-        self.content_settings = ContentSettings()
-        self.copy = CopyProperties()
-        self.server_encrypted = None
-
-
-class ContentSettings(object):
-    '''
-    Used to store the content settings of a file.
-
-    :ivar str content_type:
-        The content type specified for the file. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If content_encoding has previously been set
-        for the file, that value is stored.
-    :ivar str content_language:
-        If content_language has previously been set
-        for the file, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the file, that value is stored.
-    :ivar str cache_control:
-        If cache_control has previously been set for
-        the file, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the file, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None):
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return {
-            'x-ms-cache-control': _to_str(self.cache_control),
-            'x-ms-content-type': _to_str(self.content_type),
-            'x-ms-content-disposition': _to_str(self.content_disposition),
-            'x-ms-content-md5': _to_str(self.content_md5),
-            'x-ms-content-encoding': _to_str(self.content_encoding),
-            'x-ms-content-language': _to_str(self.content_language),
-        }
-
-
-class CopyProperties(object):
-    '''
-    File Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy File operation where this file
-        was the destination file. This header does not appear if this file has never
-        been the destination in a Copy File operation, or if this file has been
-        modified after a concluded Copy File operation using Set File Properties or
-        Put File.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source file used in the last attempted
-        Copy File operation where this file was the destination file. This header does not
-        appear if this file has never been the destination in a Copy File operation, or if
-        this file has been modified after a concluded Copy File operation using
-        Set File Properties or Put File.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending: 
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy File.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy File operation where this file was the destination file. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy File operation where this file was the
-        destination file. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure. 
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class FileRange(object):
-    '''
-    File Range.
-    
-    :ivar int start:
-        Byte index for start of file range.
-    :ivar int end:
-        Byte index for end of file range.
-    '''
-
-    def __init__(self, start=None, end=None):
-        self.start = start
-        self.end = end
-
-
-class DeleteSnapshot(object):
-    '''
-    Required if the Share has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the share and all of its snapshots.
-    '''
-
-
-class FilePermissions(object):
-    '''
-    FilePermissions class to be used with 
-    :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API.
-
-    :ivar FilePermissions FilePermissions.CREATE:
-        Create a new file or copy a file to a new file.
-    :ivar FilePermissions FilePermissions.DELETE: 
-        Delete the file.
-    :ivar FilePermissions FilePermissions.READ:
-        Read the content, properties, metadata. Use the file as the source of a copy 
-        operation.
-    :ivar FilePermissions FilePermissions.WRITE: 
-        Create or write content, properties, metadata. Resize the file. Use the file 
-        as the destination of a copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, create=False, write=False, delete=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata. Use the file as the source of a copy 
-            operation.
-        :param bool create:
-            Create a new file or copy a file to a new file.
-        :param bool write: 
-            Create or write content, properties, metadata. Resize the file. Use the file 
-            as the destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the file.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-FilePermissions.CREATE = FilePermissions(create=True)
-FilePermissions.DELETE = FilePermissions(delete=True)
-FilePermissions.READ = FilePermissions(read=True)
-FilePermissions.WRITE = FilePermissions(write=True)
-
-
-class SharePermissions(object):
-    '''
-    SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature`
-    method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. 
-
-    :ivar SharePermissions FilePermissions.DELETE: 
-        Delete any file in the share.
-        Note: You cannot grant permissions to delete a share with a service SAS. Use 
-        an account SAS instead.
-    :ivar SharePermissions FilePermissions.LIST: 
-        List files and directories in the share.
-    :ivar SharePermissions FilePermissions.READ:
-        Read the content, properties or metadata of any file in the share. Use any 
-        file in the share as the source of a copy operation.
-    :ivar SharePermissions FilePermissions.WRITE: 
-        For any file in the share, create or write content, properties or metadata. 
-        Resize the file. Use the file as the destination of a copy operation within 
-        the same account.
-        Note: You cannot grant permissions to read or write share properties or 
-        metadata with a service SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties or metadata of any file in the share. Use any 
-            file in the share as the source of a copy operation.
-        :param bool write: 
-            For any file in the share, create or write content, properties or metadata. 
-            Resize the file. Use the file as the destination of a copy operation within 
-            the same account.
-            Note: You cannot grant permissions to read or write share properties or 
-            metadata with a service SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any file in the share.
-            Note: You cannot grant permissions to delete a share with a service SAS. Use 
-            an account SAS instead.
-        :param bool list: 
-            List files and directories in the share.
-        :param str _str: 
-            A string representing the permissions
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-
-    def __or__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else ''))
-
-
-SharePermissions.DELETE = SharePermissions(delete=True)
-SharePermissions.LIST = SharePermissions(list=True)
-SharePermissions.READ = SharePermissions(read=True)
-SharePermissions.WRITE = SharePermissions(write=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/file/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/file/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,197 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-)
-from ..common._common_conversion import (
-    _to_str,
-)
-from ._constants import X_MS_VERSION
-
-
-class FileSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating file and share access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_file(self, share_name, directory_name=None, file_name=None,
-                      permission=None, expiry=None, start=None, id=None,
-                      ip=None, protocol=None, cache_control=None,
-                      content_disposition=None, content_encoding=None,
-                      content_language=None, content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = share_name
-        if directory_name is not None:
-            resource_path += '/' + _to_str(directory_name)
-        resource_path += '/' + _to_str(file_name)
-
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('f')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path)
-
-        return sas.get_token()
-
-    def generate_share(self, share_name, permission=None, expiry=None,
-                       start=None, id=None, ip=None, protocol=None,
-                       cache_control=None, content_disposition=None,
-                       content_encoding=None, content_language=None,
-                       content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('s')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name)
-
-        return sas.get_token()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from .models import (
-    Queue,
-    QueueMessage,
-    QueuePermissions,
-    QueueMessageFormat,
-)
-
-from .queueservice import QueueService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '0.36.0'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-04-17'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    Queue,
-    QueueMessage,
-)
-from ..common.models import (
-    _list,
-)
-from ..common._deserialization import (
-    _int_to_str,
-    _parse_metadata,
-)
-from ._encryption import (
-    _decrypt_queue_message,
-)
-
-
-def _parse_metadata_and_message_count(response):
-    '''
-    Extracts approximate messages count header.
-    '''
-    metadata = _parse_metadata(response)
-    metadata.approximate_message_count = _int_to_str(response.headers.get('x-ms-approximate-messages-count'))
-
-    return metadata
-
-
-def _parse_queue_message_from_headers(response):
-    '''
-    Extracts pop receipt and time next visible from headers.
-    '''
-    message = QueueMessage()
-    message.pop_receipt = response.headers.get('x-ms-popreceipt')
-    message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible'))
-
-    return message
-
-
-def _convert_xml_to_queues(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Queues>
-        <Queue>
-          <Name>string-value</Name>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Queue>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    queues = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(queues, 'next_marker', next_marker)
-
-    queues_element = list_element.find('Queues')
-
-    for queue_element in queues_element.findall('Queue'):
-        # Name element
-        queue = Queue()
-        queue.name = queue_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = queue_element.find('Metadata')
-        if metadata_root_element is not None:
-            queue.metadata = dict()
-            for metadata_element in metadata_root_element:
-                queue.metadata[metadata_element.tag] = metadata_element.text
-
-        # Add queue to list
-        queues.append(queue)
-
-    return queues
-
-
-def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver,
-                                   content=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessagesList>
-        <QueueMessage>
-          <MessageId>string-message-id</MessageId>
-          <InsertionTime>insertion-time</InsertionTime>
-          <ExpirationTime>expiration-time</ExpirationTime>
-          <PopReceipt>opaque-string-receipt-data</PopReceipt>
-          <TimeNextVisible>time-next-visible</TimeNextVisible>
-          <DequeueCount>integer</DequeueCount>
-          <MessageText>message-body</MessageText>
-        </QueueMessage>
-    </QueueMessagesList>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    messages = list()
-    list_element = ETree.fromstring(response.body)
-
-    for message_element in list_element.findall('QueueMessage'):
-        message = QueueMessage()
-
-        message.id = message_element.findtext('MessageId')
-
-        dequeue_count = message_element.findtext('DequeueCount')
-        if dequeue_count is not None:
-            message.dequeue_count = _int_to_str(dequeue_count)
-
-        # content is not returned for put_message
-        if content is not None:
-            message.content = content
-        else:
-            message.content = message_element.findtext('MessageText')
-            if (key_encryption_key is not None) or (resolver is not None):
-                message.content = _decrypt_queue_message(message.content, require_encryption,
-                                                         key_encryption_key, resolver)
-            message.content = decode_function(message.content)
-
-        message.insertion_time = parser.parse(message_element.findtext('InsertionTime'))
-        message.expiration_time = parser.parse(message_element.findtext('ExpirationTime'))
-
-        message.pop_receipt = message_element.findtext('PopReceipt')
-
-        time_next_visible = message_element.find('TimeNextVisible')
-        if time_next_visible is not None:
-            message.time_next_visible = parser.parse(time_next_visible.text)
-
-        # Add message to list
-        messages.append(message)
-
-    return messages
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_encryption.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,168 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-import os
-from json import (
-    dumps,
-    loads,
-)
-
-from azure.common import (
-    AzureException,
-)
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes
-)
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _dict_to_encryption_data,
-    _generate_AES_CBC_cipher,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm,
-)
-from ..common._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-)
-from ._error import (
-    _ERROR_MESSAGE_NOT_ENCRYPTED
-)
-
-
-def _encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). 
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should 
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED)
-        else:
-            return message
-    try:
-        return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception:
-        raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-
-def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_error.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,36 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-from ..common._error import (
-    _validate_type_bytes,
-)
-
-_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.'
-_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.'
-_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.'
-_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.'
-
-def _validate_message_type_text(param):
-    if sys.version_info < (3,):
-        if not isinstance(param, unicode):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE)
-    else:
-        if not isinstance(param, str):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR)
-
-
-def _validate_message_type_bytes(param):
-    _validate_type_bytes('message', param)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,82 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    try:
-        from cStringIO import StringIO as BytesIO
-    except:
-        from StringIO import StringIO as BytesIO
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ..common._common_conversion import (
-    _str,
-)
-from ._encryption import (
-    _encrypt_queue_message,
-)
-
-
-def _get_path(queue_name=None, include_messages=None, message_id=None):
-    '''
-    Creates the path to access a queue resource.
-
-    queue_name:
-        Name of queue.
-    include_messages:
-        Whether or not to include messages.
-    message_id:
-        Message id.
-    '''
-    if queue_name and include_messages and message_id:
-        return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
-    if queue_name and include_messages:
-        return '/{0}/messages'.format(_str(queue_name))
-    elif queue_name:
-        return '/{0}'.format(_str(queue_name))
-    else:
-        return '/'
-
-
-def _convert_queue_message_xml(message_text, encode_function, key_encryption_key):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessage>
-        <MessageText></MessageText>
-    </QueueMessage>
-    '''
-    queue_message_element = ETree.Element('QueueMessage')
-
-    # Enabled
-    message_text = encode_function(message_text)
-    if key_encryption_key is not None:
-        message_text = _encrypt_queue_message(message_text, key_encryption_key)
-    ETree.SubElement(queue_message_element, 'MessageText').text = message_text
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-        output = stream.getvalue()
-    finally:
-        stream.close()
-
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/models.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,248 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from base64 import (
-    b64encode,
-    b64decode,
-)
-from xml.sax.saxutils import escape as xml_escape
-from xml.sax.saxutils import unescape as xml_unescape
-
-from ._error import (
-    _validate_message_type_bytes,
-    _validate_message_type_text,
-    _ERROR_MESSAGE_NOT_BASE64,
-)
-
-
-class Queue(object):
-    '''
-    Queue class.
-     
-    :ivar str name: 
-        The name of the queue.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the queue as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list queues operation. If this parameter was specified but the 
-        queue has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self):
-        self.name = None
-        self.metadata = None
-
-
-class QueueMessage(object):
-    ''' 
-    Queue message class. 
-
-    :ivar str id: 
-        A GUID value assigned to the message by the Queue service that 
-        identifies the message in the queue. This value may be used together 
-        with the value of pop_receipt to delete a message from the queue after 
-        it has been retrieved with the get messages operation. 
-    :ivar date insertion_time: 
-        A UTC date value representing the time the messages was inserted.
-    :ivar date expiration_time: 
-        A UTC date value representing the time the message expires.
-    :ivar int dequeue_count: 
-        Begins with a value of 1 the first time the message is dequeued. This 
-        value is incremented each time the message is subsequently dequeued.
-    :ivar obj content: 
-        The message content. Type is determined by the decode_function set on 
-        the service. Default is str.
-    :ivar str pop_receipt: 
-        A receipt str which can be used together with the message_id element to 
-        delete a message from the queue after it has been retrieved with the get 
-        messages operation. Only returned by get messages operations. Set to 
-        None for peek messages.
-    :ivar date time_next_visible: 
-        A UTC date value representing the time the message will next be visible. 
-        Only returned by get messages operations. Set to None for peek messages.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.insertion_time = None
-        self.expiration_time = None
-        self.dequeue_count = None
-        self.content = None
-        self.pop_receipt = None
-        self.time_next_visible = None
-
-
-class QueueMessageFormat:
-    ''' 
-    Encoding and decoding methods which can be used to modify how the queue service 
-    encodes and decodes queue messages. Set these to queueservice.encode_function 
-    and queueservice.decode_function to modify the behavior. The defaults are 
-    text_xmlencode and text_xmldecode, respectively.
-    '''
-
-    @staticmethod
-    def text_base64encode(data):
-        '''
-        Base64 encode unicode text.
-        
-        :param str data: String to encode.
-        :return: Base64 encoded string.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return b64encode(data.encode('utf-8')).decode('utf-8')
-
-    @staticmethod
-    def text_base64decode(data):
-        '''
-        Base64 decode to unicode text.
-        
-        :param str data: String data to decode to unicode.
-        :return: Base64 decoded string.
-        :rtype: str
-        '''
-        try:
-            return b64decode(data.encode('utf-8')).decode('utf-8')
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def binary_base64encode(data):
-        '''
-        Base64 encode byte strings.
-        
-        :param str data: Binary string to encode.
-        :return: Base64 encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_bytes(data)
-        return b64encode(data).decode('utf-8')
-
-    @staticmethod
-    def binary_base64decode(data):
-        '''
-        Base64 decode to byte string.
-        
-        :param str data: Data to decode to a byte string.
-        :return: Base64 decoded data.
-        :rtype: str
-        '''
-        try:
-            return b64decode(data.encode('utf-8'))
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def text_xmlencode(data):
-        ''' 
-        XML encode unicode text.
-
-        :param str data: Unicode string to encode
-        :return: XML encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return xml_escape(data)
-
-    @staticmethod
-    def text_xmldecode(data):
-        ''' 
-        XML decode to unicode text.
-
-        :param str data: Data to decode to unicode.
-        :return: XML decoded data.
-        :rtype: str
-        '''
-        return xml_unescape(data)
-
-    @staticmethod
-    def noencode(data):
-        ''' 
-        Do no encoding. 
-
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str
-        '''
-        return data
-
-    @staticmethod
-    def nodecode(data):
-        '''
-        Do no decoding.
-        
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str        
-        '''
-        return data
-
-
-class QueuePermissions(object):
-    '''
-    QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature`
-    method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. 
-
-    :ivar QueuePermissions QueuePermissions.READ: 
-        Read metadata and properties, including message count. Peek at messages. 
-    :ivar QueuePermissions QueuePermissions.ADD: 
-        Add messages to the queue.
-    :ivar QueuePermissions QueuePermissions.UPDATE:
-        Update messages in the queue. Note: Use the Process permission with 
-        Update so you can first get the message you want to update.
-    :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities.
-        Get and delete messages from the queue. 
-    '''
-
-    def __init__(self, read=False, add=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Read metadata and properties, including message count. Peek at messages.
-        :param bool add:
-            Add messages to the queue.
-        :param bool update:
-            Update messages in the queue. Note: Use the Process permission with 
-            Update so you can first get the message you want to update.
-        :param bool process: 
-            Get and delete messages from the queue.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-QueuePermissions.READ = QueuePermissions(read=True)
-QueuePermissions.ADD = QueuePermissions(add=True)
-QueuePermissions.UPDATE = QueuePermissions(update=True)
-QueuePermissions.PROCESS = QueuePermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/queueservice.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/queueservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/queueservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/queueservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,996 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-from azure.common import (
-    AzureConflictHttpError,
-    AzureHttpError,
-)
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _convert_xml_to_service_stats,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_CONFLICT,
-    _ERROR_STORAGE_MISSING_INFO,
-    _validate_access_policies,
-    _validate_encryption_required,
-    _validate_decryption_required,
-)
-from ..common._http import (
-    HTTPRequest,
-)
-from ..common._serialization import (
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from ..common._serialization import (
-    _get_request_body,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    QueueSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_queues,
-    _convert_xml_to_queue_messages,
-    _parse_queue_message_from_headers,
-    _parse_metadata_and_message_count,
-)
-from ._serialization import (
-    _convert_queue_message_xml,
-    _get_path,
-)
-from .models import (
-    QueueMessageFormat,
-)
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-_HTTP_RESPONSE_NO_CONTENT = 204
-
-
-class QueueService(StorageClient):
-    '''
-    This is the main class managing queue resources.
-
-    The Queue service stores messages. A queue can contain an unlimited number of 
-    messages, each of which can be up to 64KB in size. Messages are generally added 
-    to the end of the queue and retrieved from the front of the queue, although 
-    first in, first out (FIFO) behavior is not guaranteed.
-
-    :ivar function(data) encode_function: 
-        A function used to encode queue messages. Takes as 
-        a parameter the data passed to the put_message API and returns the encoded 
-        message. Defaults to take text and xml encode, but bytes and other 
-        encodings can be used. For example, base64 may be preferable for developing 
-        across multiple Azure Storage libraries in different languages. See the 
-        :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and 
-        no encoding methods as well as binary equivalents.
-    :ivar function(data) decode_function: 
-        A function used to encode decode messages. Takes as 
-        a parameter the data returned by the get_messages and peek_messages APIs and 
-        returns the decoded message. Defaults to return text and xml decode, but 
-        bytes and other decodings can be used. For example, base64 may be preferable 
-        for developing across multiple Azure Storage libraries in different languages. 
-        See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 
-        and no decoding methods as well as binary equivalents.
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required 
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'queue',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(QueueService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        self.encode_function = QueueMessageFormat.text_xmlencode
-        self.decode_function = QueueMessageFormat.text_xmldecode
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self.require_encryption = False
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue service.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = QueueSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.QUEUE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_queue_shared_access_signature(self, queue_name,
-                                               permission=None,
-                                               expiry=None,
-                                               start=None,
-                                               id=None,
-                                               ip=None, protocol=None, ):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            The name of the queue to create a SAS token for.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_queue_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = QueueSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_queue(
-            queue_name,
-            permission=permission,
-            expiry=expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-        )
-
-    def get_queue_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Queue service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The queue service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def get_queue_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Queue service, including
-        logging, analytics and CORS rules.
-
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The queue service properties.
-        :rtype: :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def set_queue_service_properties(self, logging=None, hour_metrics=None,
-                                     minute_metrics=None, cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Queue service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved. 
-        For more information on Azure Storage Analytics, see 
-        https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
-
-        :param Logging logging:
-            The logging settings provide request logs.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for queuess.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for queues.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service. For detailed information 
-            about CORS rules and evaluation logic, see 
-            https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
-        self._perform_request(request)
-
-    def list_queues(self, prefix=None, num_results=None, include_metadata=False,
-                    marker=None, timeout=None):
-        '''
-        Returns a generator to list the queues. The generator will lazily follow 
-        the continuation tokens returned by the service and stop when all queues 
-        have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        queues, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param int num_results:
-            The maximum number of queues to return.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        '''
-        include = 'metadata' if include_metadata else None
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include,
-                  'marker': marker, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_queues(**kwargs)
-
-        return ListGenerator(resp, self._list_queues, (), kwargs)
-
-    def _list_queues(self, prefix=None, marker=None, max_results=None,
-                     include=None, timeout=None, _context=None):
-        '''
-        Returns a list of queues under the specified account. Makes a single list 
-        request to the service. Used internally by the list_queues method.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param str marker:
-            A token which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            queues. The marker value is opaque to the client.
-        :param int max_results:
-            The maximum number of queues to return. A single list request may 
-            return up to 1000 queues and potentially a continuation token which 
-            should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queues, operation_context=_context)
-
-    def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a queue under the given account.
-
-        :param str queue_name:
-            The name of the queue to create. A queue name must be from 3 through 
-            63 characters long and may only contain lowercase letters, numbers, 
-            and the dash (-) character. The first and last letters in the queue 
-            must be alphanumeric. The dash (-) character cannot be the first or 
-            last character. Consecutive dash characters are not permitted in the 
-            queue name.
-        :param metadata:
-            A dict containing name-value pairs to associate with the queue as 
-            metadata. Note that metadata names preserve the case with which they 
-            were created, but are case-insensitive when set or read. 
-        :type metadata: dict(str, str)
-        :param bool fail_on_exist:
-            Specifies whether to throw an exception if the queue already exists.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was created. If fail_on_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        _add_metadata_headers(metadata, request)
-
-        def _return_request(request):
-            return request
-
-        if not fail_on_exist:
-            try:
-                response = self._perform_request(request, parser=_return_request)
-                if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                    return False
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            response = self._perform_request(request, parser=_return_request)
-            if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                raise AzureConflictHttpError(
-                    _ERROR_CONFLICT.format(response.message), response.status)
-            return True
-
-    def delete_queue(self, queue_name, fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion 
-        and is no longer accessible to clients. The queue is later removed from 
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete. 
-        If an operation is attempted against the queue while it was being deleted, 
-        an :class:`AzureConflictHttpError` will be thrown.
-
-        :param str queue_name:
-            The name of the queue to delete.
-        :param bool fail_not_exist:
-            Specifies whether to throw an exception if the queue doesn't exist.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was deleted. If fail_not_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_queue_metadata(self, queue_name, timeout=None):
-        '''
-        Retrieves user-defined metadata and queue properties on the specified
-        queue. Metadata is associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A dictionary representing the queue metadata with an 
-            approximate_message_count int property on the dict estimating the 
-            number of messages in the queue.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _parse_metadata_and_message_count)
-
-    def set_queue_metadata(self, queue_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata on the specified queue. Metadata is
-        associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param dict metadata:
-            A dict containing name-value pairs to associate with the
-            queue as metadata.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def exists(self, queue_name, timeout=None):
-        '''
-        Returns a boolean indicating whether the queue exists.
-
-        :param str queue_name:
-            The name of queue to check for existence.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A boolean indicating whether the queue exists.
-        :rtype: bool
-        '''
-        try:
-            self.get_queue_metadata(queue_name, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def get_queue_acl(self, queue_name, timeout=None):
-        '''
-        Returns details about any stored access policies specified on the
-        queue that may be used with Shared Access Signatures.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the queue.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets stored access policies for the queue that may be used with Shared 
-        Access Signatures. 
-        
-        When you set permissions for a queue, the existing permissions are replaced. 
-        To update the queue's permissions, call :func:`~get_queue_acl` to fetch 
-        all access policies associated with the queue, modify the access policy 
-        that you wish to change, and then call this function with the complete 
-        set of data to perform the update.
-
-        When you establish a stored access policy on a queue, it may take up to 
-        30 seconds to take effect. During this interval, a shared access signature 
-        that is associated with the stored access policy will throw an 
-        :class:`AzureHttpError` until the access policy becomes active.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the queue. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-        self._perform_request(request)
-
-    def put_message(self, queue_name, content, visibility_timeout=None,
-                    time_to_live=None, timeout=None):
-        '''
-        Adds a new message to the back of the message queue. 
-
-        The visibility timeout specifies the time that the message will be 
-        invisible. After the timeout expires, the message will become visible. 
-        If a visibility timeout is not specified, the default value of 0 is used.
-
-        The message time-to-live specifies how long a message will remain in the 
-        queue. The message will be deleted from the queue when the time-to-live 
-        period expires.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param str queue_name:
-            The name of the queue to put the message into.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str. The encoded message can be up to 
-            64KB in size.
-        :param int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :param int time_to_live:
-            Specifies the time-to-live interval for the message, in
-            seconds. The maximum time-to-live allowed is 7 days. If this
-            parameter is omitted, the default time-to-live is 7 days.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.models.QueueMessage` object.
-            This object is also populated with the content although it is not
-            returned from the service.
-        :rtype: :class:`~azure.storage.queue.models.QueueMessage`
-        '''
-
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('content', content)
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'visibilitytimeout': _to_str(visibility_timeout),
-            'messagettl': _to_str(time_to_live),
-            'timeout': _int_to_str(timeout)
-        }
-
-        request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
-                                                                    self.key_encryption_key))
-
-        message_list = self._perform_request(request, _convert_xml_to_queue_messages,
-                                             [self.decode_function, False,
-                                              None, None, content])
-        return message_list[0]
-
-    def get_messages(self, queue_name, num_messages=None,
-                     visibility_timeout=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue.
-
-        When a message is retrieved from the queue, the response includes the message 
-        content and a pop_receipt value, which is required to delete the message. 
-        The message is not automatically deleted from the queue, but after it has 
-        been retrieved, it is not visible to other clients for the time interval 
-        specified by the visibility_timeout parameter.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :param str queue_name:
-            The name of the queue to get messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to retrieve from the queue, up to a maximum of 32. If
-            fewer are visible, the visible messages are returned. By default,
-            a single message is retrieved from the queue with this operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds, relative
-            to server time. The new value must be larger than or equal to 1
-            second, and cannot be larger than 7 days. The visibility timeout of 
-            a message can be set to a value later than the expiry time.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-        _validate_decryption_required(self.require_encryption, self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'numofmessages': _to_str(num_messages),
-            'visibilitytimeout': _to_str(visibility_timeout),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queue_messages,
-                                     [self.decode_function, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def peek_messages(self, queue_name, num_messages=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue, but does
-        not alter the visibility of the message.
-
-        Only messages that are visible may be retrieved. When a message is retrieved 
-        for the first time with a call to get_messages, its dequeue_count property 
-        is set to 1. If it is not deleted and is subsequently retrieved again, the 
-        dequeue_count property is incremented. The client may use this value to 
-        determine how many times a message has been retrieved. Note that a call 
-        to peek_messages does not increment the value of DequeueCount, but returns 
-        this value for the client to read.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :param str queue_name:
-            The name of the queue to peek messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to peek from the queue, up to a maximum of 32. By default,
-            a single message is peeked from the queue with this operation.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that 
-            time_next_visible and pop_receipt will not be populated as peek does 
-            not pop the message and can only retrieve already visible messages.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-
-        _validate_decryption_required(self.require_encryption, self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'peekonly': 'true',
-            'numofmessages': _to_str(num_messages),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queue_messages,
-                                     [self.decode_function, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def delete_message(self, queue_name, message_id, pop_receipt, timeout=None):
-        '''
-        Deletes the specified message.
-
-        Normally after a client retrieves a message with the get_messages operation, 
-        the client is expected to process and delete the message. To delete the 
-        message, you must have two items of data: id and pop_receipt. The 
-        id is returned from the previous get_messages operation. The 
-        pop_receipt is returned from the most recent :func:`~get_messages` or 
-        :func:`~update_message` operation. In order for the delete_message operation 
-        to succeed, the pop_receipt specified on the request must match the 
-        pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` 
-        operation. 
-
-        :param str queue_name:
-            The name of the queue from which to delete the message.
-        :param str message_id:
-            The message id identifying the message to delete.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message`.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = {
-            'popreceipt': _to_str(pop_receipt),
-            'timeout': _int_to_str(timeout)
-        }
-        self._perform_request(request)
-
-    def clear_messages(self, queue_name, timeout=None):
-        '''
-        Deletes all messages from the specified queue.
-
-        :param str queue_name:
-            The name of the queue whose messages to clear.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {'timeout': _int_to_str(timeout)}
-        self._perform_request(request)
-
-    def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout,
-                       content=None, timeout=None):
-        '''
-        Updates the visibility timeout of a message. You can also use this
-        operation to update the contents of a message.
-
-        This operation can be used to continually extend the invisibility of a 
-        queue message. This functionality can be useful if you want a worker role 
-        to "lease" a queue message. For example, if a worker role calls get_messages 
-        and recognizes that it needs more time to process a message, it can 
-        continually extend the message's invisibility until it is processed. If 
-        the worker role were to fail during processing, eventually the message 
-        would become visible again and another worker role could process it.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param str queue_name:
-            The name of the queue containing the message to update.
-        :param str message_id:
-            The message id identifying the message to update.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message` operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds,
-            relative to server time. The new value must be larger than or equal
-            to 0, and cannot be larger than 7 days. The visibility timeout of a
-            message cannot be set to a value later than the expiry time. A
-            message can be updated until it has been deleted or has expired.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. For convenience,
-            this object is also populated with the content, although it is not returned by the service.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        _validate_not_none('visibility_timeout', visibility_timeout)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = {
-            'popreceipt': _to_str(pop_receipt),
-            'visibilitytimeout': _int_to_str(visibility_timeout),
-            'timeout': _int_to_str(timeout)
-        }
-
-        if content is not None:
-            request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
-                                                                        self.key_encryption_key))
-
-        return self._perform_request(request, _parse_queue_message_from_headers)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_04_17/queue/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_04_17/queue/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,90 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-)
-from ._constants import X_MS_VERSION
-
-
-class QueueSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating queue shares access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_queue(self, queue_name, permission=None,
-                       expiry=None, start=None, id=None,
-                       ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            Name of queue.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, add, update, process.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name)
-
-        return sas.get_token()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,31 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from .appendblobservice import AppendBlobService
-from .blockblobservice import BlockBlobService
-from .models import (
-    Container,
-    ContainerProperties,
-    Blob,
-    BlobProperties,
-    BlobBlock,
-    BlobBlockList,
-    PageRange,
-    ContentSettings,
-    CopyProperties,
-    ContainerPermissions,
-    BlobPermissions,
-    _LeaseActions,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    Include,
-    SequenceNumberAction,
-    BlockListType,
-    PublicAccess,
-    BlobPrefix,
-    DeleteSnapshot,
-)
-from .pageblobservice import PageBlobService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '1.2.0rc1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-11-09'
-
-# internal configurations, should not be changed
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,442 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.common import AzureException
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from ..common._common_conversion import (
-    _decode_base64_to_text,
-    _to_str,
-    _get_content_md5
-)
-from ..common._deserialization import (
-    _parse_properties,
-    _to_int,
-    _parse_metadata,
-    _convert_xml_to_signed_identifiers,
-    _bool,
-)
-from .models import (
-    Container,
-    Blob,
-    BlobBlock,
-    BlobBlockList,
-    BlobBlockState,
-    BlobProperties,
-    PageRange,
-    ContainerProperties,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    BlobPrefix,
-)
-from ._encryption import _decrypt_blob
-from ..common.models import _list
-from ..common._error import (
-    _validate_content_match,
-    _ERROR_DECRYPTION_FAILURE,
-)
-
-
-def _parse_base_properties(response):
-    '''
-    Extracts basic response headers.
-    '''
-    resource_properties = ResourceProperties()
-    resource_properties.last_modified = parser.parse(response.headers.get('last-modified'))
-    resource_properties.etag = response.headers.get('etag')
-
-    return resource_properties
-
-
-def _parse_page_properties(response):
-    '''
-    Extracts page response headers.
-    '''
-    put_page = PageBlobProperties()
-    put_page.last_modified = parser.parse(response.headers.get('last-modified'))
-    put_page.etag = response.headers.get('etag')
-    put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number'))
-
-    return put_page
-
-
-def _parse_append_block(response):
-    '''
-    Extracts append block response headers.
-    '''
-    append_block = AppendBlockProperties()
-    append_block.last_modified = parser.parse(response.headers.get('last-modified'))
-    append_block.etag = response.headers.get('etag')
-    append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset'))
-    append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count'))
-
-    return append_block
-
-
-def _parse_snapshot_blob(response, name):
-    '''
-    Extracts snapshot return header.
-    '''
-    snapshot = response.headers.get('x-ms-snapshot')
-
-    return _parse_blob(response, name, snapshot)
-
-
-def _parse_lease(response):
-    '''
-    Extracts lease time and ID return headers.
-    '''
-    lease = {'time': response.headers.get('x-ms-lease-time')}
-    if lease['time']:
-        lease['time'] = _to_int(lease['time'])
-
-    lease['id'] = response.headers.get('x-ms-lease-id')
-
-    return lease
-
-
-def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False,
-                key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, BlobProperties)
-
-    # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5
-    content_settings = getattr(props, 'content_settings')
-    if 'content-range' in response.headers:
-        if 'x-ms-blob-content-md5' in response.headers:
-            setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5']))
-        else:
-            delattr(content_settings, 'content_md5')
-
-    if validate_content:
-        computed_md5 = _get_content_md5(response.body)
-        _validate_content_match(response.headers['content-md5'], computed_md5)
-
-    if key_encryption_key is not None or key_resolver_function is not None:
-        try:
-            response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function,
-                                          response, start_offset, end_offset)
-        except:
-            raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-    return Blob(name, snapshot, response.body, props, metadata)
-
-
-def _parse_container(response, name):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ContainerProperties)
-    return Container(name, props, metadata)
-
-
-def _convert_xml_to_signed_identifiers_and_access(response):
-    acl = _convert_xml_to_signed_identifiers(response)
-    acl.public_access = response.headers.get('x-ms-blob-public-access')
-
-    return acl
-
-
-def _convert_xml_to_containers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.blob.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Containers>
-        <Container>
-          <Name>container-name</Name>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <LeaseStatus>locked | unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <PublicAccess>blob | container</PublicAccess>
-            <HasImmutabilityPolicy>true | false</HasImmutabilityPolicy>
-            <HasLegalHold>true | false</HasLegalHold>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Container>
-      </Containers>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    containers = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    setattr(containers, 'next_marker', list_element.findtext('NextMarker'))
-
-    containers_element = list_element.find('Containers')
-
-    for container_element in containers_element.findall('Container'):
-        # Name element
-        container = Container()
-        container.name = container_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = container_element.find('Metadata')
-        if metadata_root_element is not None:
-            container.metadata = dict()
-            for metadata_element in metadata_root_element:
-                container.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = container_element.find('Properties')
-        container.properties.etag = properties_element.findtext('Etag')
-        container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        container.properties.lease_status = properties_element.findtext('LeaseStatus')
-        container.properties.lease_state = properties_element.findtext('LeaseState')
-        container.properties.lease_duration = properties_element.findtext('LeaseDuration')
-        container.properties.public_access = properties_element.findtext('PublicAccess')
-        container.properties.has_immutability_policy = properties_element.findtext('HasImmutabilityPolicy')
-        container.properties.has_legal_hold = properties_element.findtext('HasLegalHold')
-
-        # Add container to list
-        containers.append(container)
-
-    return containers
-
-
-LIST_BLOBS_ATTRIBUTE_MAP = {
-    'Last-Modified': (None, 'last_modified', parser.parse),
-    'Etag': (None, 'etag', _to_str),
-    'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int),
-    'BlobType': (None, 'blob_type', _to_str),
-    'Content-Length': (None, 'content_length', _to_int),
-    'ServerEncrypted': (None, 'server_encrypted', _bool),
-    'Content-Type': ('content_settings', 'content_type', _to_str),
-    'Content-Encoding': ('content_settings', 'content_encoding', _to_str),
-    'Content-Disposition': ('content_settings', 'content_disposition', _to_str),
-    'Content-Language': ('content_settings', 'content_language', _to_str),
-    'Content-MD5': ('content_settings', 'content_md5', _to_str),
-    'Cache-Control': ('content_settings', 'cache_control', _to_str),
-    'LeaseStatus': ('lease', 'status', _to_str),
-    'LeaseState': ('lease', 'state', _to_str),
-    'LeaseDuration': ('lease', 'duration', _to_str),
-    'CopyId': ('copy', 'id', _to_str),
-    'CopySource': ('copy', 'source', _to_str),
-    'CopyStatus': ('copy', 'status', _to_str),
-    'CopyProgress': ('copy', 'progress', _to_str),
-    'CopyCompletionTime': ('copy', 'completion_time', _to_str),
-    'CopyStatusDescription': ('copy', 'status_description', _to_str),
-    'AccessTier': (None, 'blob_tier', _to_str),
-    'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse),
-    'AccessTierInferred': (None, 'blob_tier_inferred', _bool),
-    'ArchiveStatus': (None, 'rehydration_status', _to_str),
-    'DeletedTime': (None, 'deleted_time', parser.parse),
-    'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int),
-    'Creation-Time': (None, 'creation_time', parser.parse),
-}
-
-
-def _convert_xml_to_blob_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Delimiter>string-value</Delimiter>
-      <Blobs>
-        <Blob>
-          <Name>blob-name</name>
-          <Deleted>true</Deleted>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date-time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Content-Length>size-in-bytes</Content-Length>
-            <Content-Type>blob-content-type</Content-Type>
-            <Content-Encoding />
-            <Content-Language />
-            <Content-MD5 />
-            <Cache-Control />
-            <x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
-            <BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
-            <LeaseStatus>locked|unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <CopyId>id</CopyId>
-            <CopyStatus>pending | success | aborted | failed </CopyStatus>
-            <CopySource>source url</CopySource>
-            <CopyProgress>bytes copied/bytes total</CopyProgress>
-            <CopyCompletionTime>datetime</CopyCompletionTime>
-            <CopyStatusDescription>error string</CopyStatusDescription>
-            <AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
-            <AccessTierChangeTime>date-time-value</AccessTierChangeTime>
-            <AccessTierInferred>true</AccessTierInferred>
-            <DeletedTime>datetime</DeletedTime>
-            <RemainingRetentionDays>int</RemainingRetentionDays>
-            <Creation-Time>date-time-value</Creation-Time>
-          </Properties>
-          <Metadata>   
-            <Name>value</Name>
-          </Metadata>
-        </Blob>
-        <BlobPrefix>
-          <Name>blob-prefix</Name>
-        </BlobPrefix>
-      </Blobs>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    blob_list = _list()
-    list_element = ETree.fromstring(response.body)
-
-    setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
-
-    blobs_element = list_element.find('Blobs')
-    blob_prefix_elements = blobs_element.findall('BlobPrefix')
-    if blob_prefix_elements is not None:
-        for blob_prefix_element in blob_prefix_elements:
-            prefix = BlobPrefix()
-            prefix.name = blob_prefix_element.findtext('Name')
-            blob_list.append(prefix)
-
-    for blob_element in blobs_element.findall('Blob'):
-        blob = Blob()
-        blob.name = blob_element.findtext('Name')
-        blob.snapshot = blob_element.findtext('Snapshot')
-
-        deleted = blob_element.findtext('Deleted')
-        if deleted:
-            blob.deleted = _bool(deleted)
-
-        # Properties
-        properties_element = blob_element.find('Properties')
-        if properties_element is not None:
-            for property_element in properties_element:
-                info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag)
-                if info is None:
-                    setattr(blob.properties, property_element.tag, _to_str(property_element.text))
-                elif info[0] is None:
-                    setattr(blob.properties, info[1], info[2](property_element.text))
-                else:
-                    attr = getattr(blob.properties, info[0])
-                    setattr(attr, info[1], info[2](property_element.text))
-
-        # Metadata
-        metadata_root_element = blob_element.find('Metadata')
-        if metadata_root_element is not None:
-            blob.metadata = dict()
-            for metadata_element in metadata_root_element:
-                blob.metadata[metadata_element.tag] = metadata_element.text
-
-        # Add blob to list
-        blob_list.append(blob)
-
-    return blob_list
-
-
-def _convert_xml_to_block_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <CommittedBlocks>
-         <Block>
-            <Name>base64-encoded-block-id</Name>
-            <Size>size-in-bytes</Size>
-         </Block>
-      </CommittedBlocks>
-      <UncommittedBlocks>
-        <Block>
-          <Name>base64-encoded-block-id</Name>
-          <Size>size-in-bytes</Size>
-        </Block>
-      </UncommittedBlocks>
-     </BlockList>
-
-    Converts xml response to block list class.
-    '''
-    if response is None or response.body is None:
-        return None
-
-    block_list = BlobBlockList()
-
-    list_element = ETree.fromstring(response.body)
-
-    committed_blocks_element = list_element.find('CommittedBlocks')
-    if committed_blocks_element is not None:
-        for block_element in committed_blocks_element.findall('Block'):
-            block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-            block_size = int(block_element.findtext('Size'))
-            block = BlobBlock(id=block_id, state=BlobBlockState.Committed)
-            block._set_size(block_size)
-            block_list.committed_blocks.append(block)
-
-    uncommitted_blocks_element = list_element.find('UncommittedBlocks')
-    if uncommitted_blocks_element is not None:
-        for block_element in uncommitted_blocks_element.findall('Block'):
-            block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-            block_size = int(block_element.findtext('Size'))
-            block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted)
-            block._set_size(block_size)
-            block_list.uncommitted_blocks.append(block)
-
-    return block_list
-
-
-def _convert_xml_to_page_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <PageList>
-       <PageRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </PageRange> 
-       <ClearRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </ClearRange> 
-       <PageRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </PageRange> 
-    </PageList> 
-    '''
-    if response is None or response.body is None:
-        return None
-
-    page_list = list()
-
-    list_element = ETree.fromstring(response.body)
-
-    for page_range_element in list_element:
-        if page_range_element.tag == 'PageRange':
-            is_cleared = False
-        elif page_range_element.tag == 'ClearRange':
-            is_cleared = True
-        else:
-            pass  # ignore any unrecognized Page Range types
-
-        page_list.append(
-            PageRange(
-                int(page_range_element.findtext('Start')),
-                int(page_range_element.findtext('End')),
-                is_cleared
-            )
-        )
-
-    return page_list
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_download_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_download_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_download_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_download_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,127 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import threading
-
-from ..common._error import _ERROR_NO_SINGLE_THREAD_CHUNKING
-
-
-def _download_blob_chunks(blob_service, container_name, blob_name, snapshot,
-                          download_size, block_size, progress, start_range, end_range,
-                          stream, max_connections, progress_callback, validate_content,
-                          lease_id, if_modified_since, if_unmodified_since, if_match,
-                          if_none_match, timeout, operation_context):
-    if max_connections <= 1:
-        raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('blob'))
-
-    downloader = _BlobChunkDownloader(
-        blob_service,
-        container_name,
-        blob_name,
-        snapshot,
-        download_size,
-        block_size,
-        progress,
-        start_range,
-        end_range,
-        stream,
-        progress_callback,
-        validate_content,
-        lease_id,
-        if_modified_since,
-        if_unmodified_since,
-        if_match,
-        if_none_match,
-        timeout,
-        operation_context,
-    )
-
-    import concurrent.futures
-    executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-    result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-
-
-class _BlobChunkDownloader(object):
-    def __init__(self, blob_service, container_name, blob_name, snapshot, download_size,
-                 chunk_size, progress, start_range, end_range, stream,
-                 progress_callback, validate_content, lease_id, if_modified_since,
-                 if_unmodified_since, if_match, if_none_match, timeout, operation_context):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.snapshot = snapshot
-        self.chunk_size = chunk_size
-
-        self.download_size = download_size
-        self.start_index = start_range
-        self.blob_end = end_range
-
-        self.stream = stream
-        self.stream_start = stream.tell()
-        self.stream_lock = threading.Lock()
-        self.progress_callback = progress_callback
-        self.progress_total = progress
-        self.progress_lock = threading.Lock()
-        self.timeout = timeout
-        self.operation_context = operation_context
-
-        self.validate_content = validate_content
-        self.lease_id = lease_id
-        self.if_modified_since = if_modified_since
-        self.if_unmodified_since = if_unmodified_since
-        self.if_match = if_match
-        self.if_none_match = if_none_match
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.blob_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.blob_end:
-            chunk_end = self.blob_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total = self.progress_total
-                self.progress_callback(total, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-            self.stream.write(chunk_data)
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        response = self.blob_service._get_blob(
-            self.container_name,
-            self.blob_name,
-            snapshot=self.snapshot,
-            start_range=chunk_start,
-            end_range=chunk_end - 1,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            if_modified_since=self.if_modified_since,
-            if_unmodified_since=self.if_unmodified_since,
-            if_match=self.if_match,
-            if_none_match=self.if_none_match,
-            timeout=self.timeout,
-            _context=self.operation_context
-        )
-
-        # This makes sure that if_match is set so that we can validate 
-        # that subsequent downloads are to an unmodified blob
-        self.if_match = response.properties.etag
-        return response
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_encryption.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,187 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from json import (
-    dumps,
-    loads,
-)
-from os import urandom
-
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _generate_AES_CBC_cipher,
-    _dict_to_encryption_data,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-    _ERROR_DATA_NOT_ENCRYPTED,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-)
-
-
-def _encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). 
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the _upload_blob_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def _generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-    
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the 
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def _decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                  response, start_offset, end_offset):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-    
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key 
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    _validate_not_none('response', response)
-    content = response.body
-    _validate_not_none('content', content)
-
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata']))
-    except:
-        if require_encryption:
-            raise ValueError(_ERROR_DATA_NOT_ENCRYPTED)
-        else:
-            return content
-
-    if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    blob_type = response.headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    start_range, end_range = 0, len(content)
-    if 'content-range' in response.headers:
-        content_range = response.headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        start_range = int(content_range[0])
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def _get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_error.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,29 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
-    'Invalid page blob size: {0}. ' + \
-    'The size must be aligned to a 512-byte boundary.'
-
-_ERROR_PAGE_BLOB_START_ALIGNMENT = \
-    'start_range must align with 512 page size'
-
-_ERROR_PAGE_BLOB_END_ALIGNMENT = \
-    'end_range must align with 512 page size'
-
-_ERROR_INVALID_BLOCK_ID = \
-    'All blocks in block list need to have valid block ids.'
-
-_ERROR_INVALID_LEASE_DURATION = \
-    "lease_duration param needs to be between 15 and 60 or -1."
-
-_ERROR_INVALID_LEASE_BREAK_PERIOD = \
-    "lease_break_period param needs to be between 0 and 60."
-
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use blob chunk downloader more than 1 thread must be ' + \
-    'used since get_blob_to_bytes should be called for single threaded ' + \
-    'blob downloads.'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,118 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from xml.sax.saxutils import escape as xml_escape
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from ..common._common_conversion import (
-    _encode_base64,
-    _str,
-)
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-from ._error import (
-    _ERROR_PAGE_BLOB_START_ALIGNMENT,
-    _ERROR_PAGE_BLOB_END_ALIGNMENT,
-    _ERROR_INVALID_BLOCK_ID,
-)
-from io import BytesIO
-
-
-def _get_path(container_name=None, blob_name=None):
-    '''
-    Creates the path to access a blob resource.
-
-    container_name:
-        Name of container.
-    blob_name:
-        The path to the blob.
-    '''
-    if container_name and blob_name:
-        return '/{0}/{1}'.format(
-            _str(container_name),
-            _str(blob_name))
-    elif container_name:
-        return '/{0}'.format(_str(container_name))
-    else:
-        return '/'
-
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
-                                       end_range_required=True, check_content_md5=False, align_to_page=False):
-    # If end range is provided, start range must be provided
-    if start_range_required or end_range is not None:
-        _validate_not_none('start_range', start_range)
-    if end_range_required:
-        _validate_not_none('end_range', end_range)
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT)
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT)
-
-    # Format based on whether end_range is present
-    request.headers = request.headers or {}
-    if end_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        request.headers['x-ms-range'] = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers['x-ms-range-get-content-md5'] = 'true'
-
-
-def _convert_block_list_to_xml(block_id_list):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <Committed>first-base64-encoded-block-id</Committed>
-      <Uncommitted>second-base64-encoded-block-id</Uncommitted>
-      <Latest>third-base64-encoded-block-id</Latest>
-    </BlockList>
-
-    Convert a block list to xml to send.
-
-    block_id_list:
-        A list of BlobBlock containing the block ids and block state that are used in put_block_list.
-    Only get block from latest blocks.
-    '''
-    if block_id_list is None:
-        return ''
-
-    block_list_element = ETree.Element('BlockList')
-
-    # Enabled
-    for block in block_id_list:
-        if block.id is None:
-            raise ValueError(_ERROR_INVALID_BLOCK_ID)
-        id = xml_escape(_str(format(_encode_base64(block.id))))
-        ETree.SubElement(block_list_element, block.state).text = id
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    # return xml value
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_upload_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_upload_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/_upload_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/_upload_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,485 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from math import ceil
-from threading import Lock
-
-from ..common._common_conversion import _encode_base64
-from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-from ..common._serialization import (
-    url_quote,
-    _get_data_bytes_only,
-    _len_plus
-)
-from ._encryption import (
-    _get_blob_encryptor_and_padder,
-)
-from .models import BlobBlock
-from ._constants import (
-    _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-)
-
-
-def _upload_blob_chunks(blob_service, container_name, blob_name,
-                        blob_size, block_size, stream, max_connections,
-                        progress_callback, validate_content, lease_id, uploader_class,
-                        maxsize_condition=None, if_match=None, timeout=None,
-                        content_encryption_key=None, initialization_vector=None, resource_properties=None):
-    encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
-                                                       uploader_class is not _PageBlobChunkUploader)
-
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        lease_id,
-        timeout,
-        encryptor,
-        padder
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # ETag matching does not work with parallelism as a ranged upload may start 
-    # before the previous finishes and provides an etag
-    uploader.if_match = if_match if not max_connections > 1 else None
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        from threading import BoundedSemaphore
-
-        '''
-        Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
-        This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
-        the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
-        '''
-        chunk_throttler = BoundedSemaphore(max_connections + 1)
-
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        futures = []
-        running_futures = []
-
-        # Check for exceptions and fail fast.
-        for chunk in uploader.get_chunk_streams():
-            for f in running_futures:
-                if f.done():
-                    if f.exception():
-                        raise f.exception()
-                    else:
-                        running_futures.remove(f)
-
-            chunk_throttler.acquire()
-            future = executor.submit(uploader.process_chunk, chunk)
-
-            # Calls callback upon completion (even if the callback was added after the Future task is done).
-            future.add_done_callback(lambda x: chunk_throttler.release())
-            futures.append(future)
-            running_futures.append(future)
-
-        # result() will wait until completion and also raise any exceptions that may have been set.
-        range_ids = [f.result() for f in futures]
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-
-    if resource_properties:
-        resource_properties.last_modified = uploader.last_modified
-        resource_properties.etag = uploader.etag
-
-    return range_ids
-
-
-def _upload_blob_substream_blocks(blob_service, container_name, blob_name,
-                                  blob_size, block_size, stream, max_connections,
-                                  progress_callback, validate_content, lease_id, uploader_class,
-                                  maxsize_condition=None, if_match=None, timeout=None):
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        lease_id,
-        timeout,
-        None,
-        None
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # ETag matching does not work with parallelism as a ranged upload may start
-    # before the previous finishes and provides an etag
-    uploader.if_match = if_match if not max_connections > 1 else None
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks()))
-    else:
-        range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()]
-
-    return range_ids
-
-
-class _BlobChunkUploader(object):
-    def __init__(self, blob_service, container_name, blob_name, blob_size,
-                 chunk_size, stream, parallel, progress_callback,
-                 validate_content, lease_id, timeout, encryptor, padder):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.blob_size = blob_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-        self.validate_content = validate_content
-        self.lease_id = lease_id
-        self.timeout = timeout
-        self.encryptor = encryptor
-        self.padder = padder
-        self.last_modified = None
-        self.etag = None
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.blob_size:
-                    read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                temp = _get_data_bytes_only('temp', temp)
-                data += temp
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if len(data) > 0:
-                    yield index, data
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.blob_size)
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.blob_size
-
-        if blob_length is None:
-            blob_length = _len_plus(self.stream)
-            if blob_length is None:
-                raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream'))
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            yield ('BlockId{}'.format("%05d" % i),
-                   _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size,
-                              lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class _BlockBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
-        self.blob_service._put_block(
-            self.container_name,
-            self.blob_name,
-            chunk_data,
-            block_id,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            timeout=self.timeout,
-        )
-        return BlobBlock(block_id)
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.blob_service._put_block(
-                self.container_name,
-                self.blob_name,
-                block_stream,
-                block_id,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                timeout=self.timeout,
-            )
-        finally:
-            block_stream.close()
-        return BlobBlock(block_id)
-
-
-class _PageBlobChunkUploader(_BlobChunkUploader):
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        for each_byte in chunk_data:
-            if each_byte != 0 and each_byte != b'\x00':
-                return False
-        return True
-
-    def _upload_chunk(self, chunk_start, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_start + len(chunk_data) - 1
-            resp = self.blob_service._update_page(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                chunk_start,
-                chunk_end,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                if_match=self.if_match,
-                timeout=self.timeout,
-            )
-
-            if not self.parallel:
-                self.if_match = resp.etag
-
-            self.set_response_properties(resp)
-
-
-class _AppendBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if not hasattr(self, 'current_length'):
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                timeout=self.timeout,
-            )
-
-            self.current_length = resp.append_offset
-        else:
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                appendpos_condition=self.current_length + chunk_offset,
-                timeout=self.timeout,
-            )
-
-        self.set_response_properties(resp)
-
-
-class _SubStream(IOBase):
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \
-            else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, n):
-        if self.closed:
-            raise ValueError("Stream is closed.")
-
-        # adjust if out of bounds
-        if n + self._position >= self._length:
-            n = self._length - self._position
-
-        # return fast
-        if n == 0 or self._buffer.closed:
-            return b''
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(n)
-        bytes_read = len(read_buffer)
-        bytes_remaining = n - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_connections > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = - offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/appendblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/appendblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/appendblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/appendblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,556 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from ..common._common_conversion import (
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_data_bytes_only,
-    _add_metadata_headers,
-)
-from ._deserialization import (
-    _parse_append_block,
-    _parse_base_properties,
-)
-from ._serialization import (
-    _get_path,
-)
-from ._upload_chunking import (
-    _AppendBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-    ResourceProperties
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class AppendBlobService(BaseBlobService):
-    '''
-    An append blob is comprised of blocks and is optimized for append operations.
-    When you modify an append blob, blocks are added to the end of the blob only,
-    via the append_block operation. Updating or deleting of existing blocks is not
-    supported. Unlike a block blob, an append blob does not expose its block IDs. 
-
-    Each block in an append blob can be a different size, up to a maximum of 4 MB,
-    and an append blob can include up to 50,000 blocks. The maximum size of an
-    append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
-
-    :ivar int MAX_BLOCK_SIZE: 
-        The size of the blocks put by append_blob_from_* methods. Smaller blocks 
-        may be put if there is less data provided. The maximum block size the service 
-        supports is 4MB.
-    '''
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None,
-                 connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        self.blob_type = _BlobTypes.AppendBlob
-        super(AppendBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout, token_credential)
-
-    def create_blob(self, container_name, blob_name, content_settings=None,
-                    metadata=None, lease_id=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a blob or overrides an existing blob. Use if_none_match=* to
-        prevent overriding an existing blob. 
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to
-            perform the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def append_block(self, container_name, blob_name, block,
-                     validate_content=False, maxsize_condition=None,
-                     appendpos_condition=None,
-                     lease_id=None, if_modified_since=None,
-                     if_unmodified_since=None, if_match=None,
-                     if_none_match=None, timeout=None):
-        '''
-        Commits a new block of data to the end of an existing append blob.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes block:
-            Content of the block in bytes.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            ETag, last modified, append offset, and committed block count 
-            properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'appendblock',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
-            'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        request.body = _get_data_bytes_only('block', block)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_append_block)
-
-    # ----Convenience APIs----------------------------------------------
-
-    def append_blob_from_path(
-            self, container_name, blob_name, file_path, validate_content=False,
-            maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from a file path, with automatic
-        chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.append_blob_from_stream(
-                container_name,
-                blob_name,
-                stream,
-                count=count,
-                validate_content=validate_content,
-                maxsize_condition=maxsize_condition,
-                progress_callback=progress_callback,
-                lease_id=lease_id,
-                timeout=timeout)
-
-    def append_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from an array of bytes, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.append_blob_from_stream(
-            container_name,
-            blob_name,
-            stream,
-            count=count,
-            validate_content=validate_content,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            timeout=timeout)
-
-    def append_blob_from_text(
-            self, container_name, blob_name, text, encoding='utf-8',
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from str/unicode, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        return self.append_blob_from_bytes(
-            container_name,
-            blob_name,
-            text,
-            index=0,
-            count=len(text),
-            validate_content=validate_content,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            timeout=timeout)
-
-    def append_blob_from_stream(
-            self, container_name, blob_name, stream, count=None,
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None):
-        '''
-        Appends to the content of an existing blob from a file/stream, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        # _upload_blob_chunks returns the block ids for block blobs so resource_properties
-        # is passed as a parameter to get the last_modified and etag for page and append blobs.
-        # this info is not needed for block_blobs since _put_block_list is called after which gets this info
-        resource_properties = ResourceProperties()
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_BLOCK_SIZE,
-            stream=stream,
-            max_connections=1,  # upload not easily parallelizable
-            progress_callback=progress_callback,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            uploader_class=_AppendBlobChunkUploader,
-            maxsize_condition=maxsize_condition,
-            timeout=timeout,
-            resource_properties=resource_properties
-        )
-
-        return resource_properties
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/baseblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/baseblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/baseblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/baseblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,3245 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from abc import ABCMeta
-
-from azure.common import AzureHttpError
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-    _StorageNoAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _parse_metadata,
-    _parse_properties,
-    _convert_xml_to_service_stats,
-    _parse_length_from_content_range,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_decryption_required,
-    _validate_access_policies,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    BlobSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_containers,
-    _parse_blob,
-    _convert_xml_to_blob_list,
-    _parse_container,
-    _parse_snapshot_blob,
-    _parse_lease,
-    _convert_xml_to_signed_identifiers_and_access,
-    _parse_base_properties,
-)
-from ._download_chunking import _download_blob_chunks
-from ._error import (
-    _ERROR_INVALID_LEASE_DURATION,
-    _ERROR_INVALID_LEASE_BREAK_PERIOD,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from .models import (
-    BlobProperties,
-    _LeaseActions,
-    ContainerPermissions,
-    BlobPermissions,
-)
-
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class BaseBlobService(StorageClient):
-    '''
-    This is the main class managing Blob resources.
-
-    The Blob service stores text and binary data as blobs in the cloud.
-    The Blob service offers the following three resources: the storage account,
-    containers, and blobs. Within your storage account, containers provide a
-    way to organize sets of blobs. For more information please see:
-    https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx
-
-    :ivar int MAX_SINGLE_GET_SIZE:
-        The size of the first range get performed by get_blob_to_* methods if
-        max_connections is greater than 1. Less data will be returned if the
-        blob is smaller than this.
-    :ivar int MAX_CHUNK_GET_SIZE:
-        The size of subsequent range gets performed by get_blob_to_* methods if
-        max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE.
-        Less data will be returned if the remainder of the blob is smaller than
-        this. If this is set to larger than 4MB, content_validation will throw an
-        error if enabled. However, if content_validation is not desired a size
-        greater than 4MB may be optimal. Setting this below 4MB is not recommended.
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    __metaclass__ = ABCMeta
-    MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None,
-                 connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'blob',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            token_credential=token_credential,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            custom_domain=custom_domain,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(BaseBlobService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-                self.is_emulated
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        elif self.token_credential:
-            self.authentication = self.token_credential
-        else:
-            self.authentication = _StorageNoAuthentication()
-
-        self.require_encryption = False
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None):
-        '''
-        Creates the url to access a blob.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when BaseBlobService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :param str snapshot:
-            An string value that uniquely identifies the snapshot. The value of
-            this query parameter indicates the snapshot version.
-        :return: blob access URL.
-        :rtype: str
-        '''
-
-        url = '{}://{}/{}/{}'.format(
-            protocol or self.protocol,
-            self.primary_endpoint,
-            container_name,
-            blob_name,
-        )
-
-        if snapshot and sas_token:
-            url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token)
-        elif snapshot:
-            url = '{}?snapshot={}'.format(url, snapshot)
-        elif sas_token:
-            url = '{}?{}'.format(url, sas_token)
-
-        return url
-
-    def make_container_url(self, container_name, protocol=None, sas_token=None):
-        '''
-        Creates the url to access a container.
-
-        :param str container_name:
-            Name of container.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when BaseBlobService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: container access URL.
-        :rtype: str
-        '''
-
-        url = '{}://{}/{}?restype=container'.format(
-            protocol or self.protocol,
-            self.primary_endpoint,
-            container_name,
-        )
-
-        if sas_token:
-            url = '{}&{}'.format(url, sas_token)
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the blob service.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.BLOB, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_container_shared_access_signature(self, container_name,
-                                                   permission=None, expiry=None,
-                                                   start=None, id=None, ip=None, protocol=None,
-                                                   cache_control=None, content_disposition=None,
-                                                   content_encoding=None, content_language=None,
-                                                   content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_container(
-            container_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_blob_shared_access_signature(
-            self, container_name, blob_name, permission=None,
-            expiry=None, start=None, id=None, ip=None, protocol=None,
-            cache_control=None, content_disposition=None,
-            content_encoding=None, content_language=None,
-            content_type=None):
-        '''
-        Generates a shared access signature for the blob.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_container_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_blob(
-            container_name,
-            blob_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def list_containers(self, prefix=None, num_results=None, include_metadata=False,
-                        marker=None, timeout=None):
-        '''
-        Returns a generator to list the containers under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all containers have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        containers, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        include = 'metadata' if include_metadata else None
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_containers(**kwargs)
-
-        return ListGenerator(resp, self._list_containers, (), kwargs)
-
-    def _list_containers(self, prefix=None, marker=None, max_results=None,
-                         include=None, timeout=None, _context=None):
-        '''
-        Returns a list of the containers under the specified account.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body. set this
-            parameter to string 'metadata' to get container's metadata.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_containers, operation_context=_context)
-
-    def create_container(self, container_name, metadata=None,
-                         public_access=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails if
-        fail_on_exist is True.
-
-        :param str container_name:
-            Name of container to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            container as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str)
-        :param ~azure.storage.blob.models.PublicAccess public_access:
-            Possible values include: container, blob.
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the container exists.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is created, False if container already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-public-access': _to_str(public_access)
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_container_properties(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata and system properties for the specified
-        container. The data returned does not include the container's list of blobs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_properties only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified container within a container object.
-        :rtype: :class:`~azure.storage.blob.models.Container`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _parse_container, [container_name])
-
-    def get_container_metadata(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the container metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_container_metadata(self, container_name, metadata=None,
-                               lease_id=None, if_modified_since=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        container. Each call to this operation replaces all existing metadata
-        attached to the container. To remove all metadata from the container,
-        call this operation with no metadata dict.
-
-        :param str container_name:
-            Name of existing container.
-        :param metadata:
-            A dict containing name-value pairs to associate with the container as 
-            metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            If specified, set_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def get_container_acl(self, container_name, lease_id=None, timeout=None):
-        '''
-        Gets the permissions for the specified container.
-        The permissions indicate whether container data may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param lease_id:
-            If specified, get_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the container. dict of str to
-            :class:`azure.storage.common.models.AccessPolicy` and a public_access property
-            if public access is turned on
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access)
-
-    def set_container_acl(self, container_name, signed_identifiers=None,
-                          public_access=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Sets the permissions for the specified container or stored access 
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the container. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param ~azure.storage.blob.models.PublicAccess public_access:
-            Possible values include: container, blob.
-        :param str lease_id:
-            If specified, set_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified date/time.
-        :param datetime if_unmodified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-public-access': _to_str(public_access),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def delete_container(self, container_name, fail_not_exist=False,
-                         lease_id=None, if_modified_since=None,
-                         if_unmodified_since=None, timeout=None):
-        '''
-        Marks the specified container for deletion. The container and any blobs
-        contained within it are later deleted during garbage collection.
-
-        :param str container_name:
-            Name of container to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the container doesn't
-            exist.
-        :param str lease_id:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is deleted, False container doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def _lease_container_impl(
-            self, container_name, lease_action, lease_id, lease_duration,
-            lease_break_period, proposed_lease_id, if_modified_since,
-            if_unmodified_since, timeout):
-        '''
-        Establishes and manages a lease on a container.
-        The Lease Container operation can be called in one of five modes
-            Acquire, to request a new lease
-            Renew, to renew an existing lease
-            Change, to change the ID of an existing lease
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the container
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_action:
-            Possible _LeaseActions values: acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the container has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. For backwards compatibility, the default is
-            60, and the value is only used on an acquire operation.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for Acquire, required for Change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'lease',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-lease-action': _to_str(lease_action),
-            'x-ms-lease-duration': _to_str(lease_duration),
-            'x-ms-lease-break-period': _to_str(lease_break_period),
-            'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-        }
-
-        return self._perform_request(request, _parse_lease)
-
-    def acquire_container_lease(
-            self, container_name, lease_duration=-1, proposed_lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Requests a new lease. If the container does not have an active lease,
-        the Blob service creates a lease on the container and returns a new
-        lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-        if lease_duration != -1 and \
-                (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Acquire,
-                                           None,  # lease_id
-                                           lease_duration,
-                                           None,  # lease_break_period
-                                           proposed_lease_id,
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['id']
-
-    def renew_container_lease(
-            self, container_name, lease_id, if_modified_since=None,
-            if_unmodified_since=None, timeout=None):
-        '''
-        Renews the lease. The lease can be renewed if the lease ID specified
-        matches that associated with the container. Note that
-        the lease may be renewed even if it has expired as long as the container
-        has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Renew,
-                                           lease_id,
-                                           None,  # lease_duration
-                                           None,  # lease_break_period
-                                           None,  # proposed_lease_id
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['id']
-
-    def release_container_lease(
-            self, container_name, lease_id, if_modified_since=None,
-            if_unmodified_since=None, timeout=None):
-        '''
-        Release the lease. The lease may be released if the lease_id specified matches
-        that associated with the container. Releasing the lease allows another client
-        to immediately acquire the lease for the container as soon as the release is complete. 
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name,
-                                   _LeaseActions.Release,
-                                   lease_id,
-                                   None,  # lease_duration
-                                   None,  # lease_break_period
-                                   None,  # proposed_lease_id
-                                   if_modified_since,
-                                   if_unmodified_since,
-                                   timeout)
-
-    def break_container_lease(
-            self, container_name, lease_break_period=None,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Break the lease, if the container has an active lease. Once a lease is
-        broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired. 
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Break,
-                                           None,  # lease_id
-                                           None,  # lease_duration
-                                           lease_break_period,
-                                           None,  # proposed_lease_id
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['time']
-
-    def change_container_lease(
-            self, container_name, lease_id, proposed_lease_id,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Change the lease ID of an active lease. A change must include the current
-        lease ID and a new lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name,
-                                   _LeaseActions.Change,
-                                   lease_id,
-                                   None,  # lease_duration
-                                   None,  # lease_break_period
-                                   proposed_lease_id,
-                                   if_modified_since,
-                                   if_unmodified_since,
-                                   timeout)
-
-    def list_blobs(self, container_name, prefix=None, num_results=None, include=None,
-                   delimiter=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all blobs have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        blobs, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`BlobPrefix` elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param ~azure.storage.blob.models.Include include:
-            Specifies one or more additional datasets to include in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
-            result list that acts as a placeholder for all blobs whose names begin
-            with the same substring up to the appearance of the delimiter character.
-            The delimiter may be a single character or a string.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (container_name,)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'delimiter': delimiter, 'timeout': timeout,
-                  '_context': operation_context}
-        resp = self._list_blobs(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_blobs, args, kwargs)
-
-    def _list_blobs(self, container_name, prefix=None, marker=None,
-                    max_results=None, include=None, delimiter=None, timeout=None,
-                    _context=None):
-        '''
-        Returns the list of blobs under the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :parm str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str include:
-            Specifies one or more datasets to include in the
-            response. To specify more than one of these options on the URI,
-            you must separate each option with a comma. Valid values are:
-                snapshots:
-                    Specifies that snapshots should be included in the
-                    enumeration. Snapshots are listed from oldest to newest in
-                    the response.
-                metadata:
-                    Specifies that blob metadata be returned in the response.
-                uncommittedblobs:
-                    Specifies that blobs for which blocks have been uploaded,
-                    but which have not been committed using Put Block List
-                    (REST API), be included in the response.
-                copy:
-                    Version 2012-02-12 and newer. Specifies that metadata
-                    related to any current or previous Copy Blob operation
-                    should be included in the response.
-                deleted:
-                    Version 2017-07-29 and newer. Specifies that soft deleted blobs
-                    which are retained by the service should be included
-                    in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a
-            placeholder for all blobs whose names begin with the same
-            substring up to the appearance of the delimiter character. The
-            delimiter may be a single character or a string.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'delimiter': _to_str(delimiter),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context)
-
-    def get_blob_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Blob service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def set_blob_service_properties(
-            self, logging=None, hour_metrics=None, minute_metrics=None,
-            cors=None, target_version=None, timeout=None, delete_retention_policy=None):
-        '''
-        Sets the properties of a storage account's Blob service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param logging:
-            Groups the Azure Analytics Logging settings.
-        :type logging:
-            :class:`~azure.storage.common.models.Logging`
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for blobs.
-        :type hour_metrics:
-            :class:`~azure.storage.common.models.Metrics`
-        :param minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for blobs.
-        :type minute_metrics:
-            :class:`~azure.storage.common.models.Metrics`
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param str target_version:
-            Indicates the default version to use for requests if an incoming 
-            request's version is not specified. 
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param delete_retention_policy:
-            The delete retention policy specifies whether to retain deleted blobs.
-            It also specifies the number of days and versions of blob to keep.
-        :type delete_retention_policy:
-            :class:`~azure.storage.common.models.DeleteRetentionPolicy`
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
-                                               cors, target_version, delete_retention_policy))
-
-        self._perform_request(request)
-
-    def get_blob_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob :class:`~azure.storage.common.models.ServiceProperties` with an attached
-            target_version property.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def get_blob_properties(
-            self, container_name, blob_name, snapshot=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the blob. It does not return the content of the blob.
-        Returns :class:`~azure.storage.blob.models.Blob`
-        with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: a blob object including properties and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_blob, [blob_name, snapshot])
-
-    def set_blob_properties(
-            self, container_name, blob_name, content_settings=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Sets system properties on the blob. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def exists(self, container_name, blob_name=None, snapshot=None, timeout=None):
-        '''
-        Returns a boolean indicating whether the container exists (if blob_name 
-        is None), or otherwise a boolean indicating whether the blob exists.
-
-        :param str container_name:
-            Name of a container.
-        :param str blob_name:
-            Name of a blob. If None, the container will be checked for existence.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the snapshot.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        try:
-            if blob_name is None:
-                self.get_container_properties(container_name, timeout=timeout)
-            else:
-                self.get_blob_properties(container_name, blob_name, snapshot=snapshot, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def _get_blob(
-            self, container_name, blob_name, snapshot=None, start_range=None,
-            end_range=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            _context=None):
-        '''
-        Downloads a blob's content, metadata, and properties. You can also
-        call this API to read a snapshot. You can specify a range if you don't
-        need to download the blob in its entirety. If no range is specified,
-        the full blob will be downloaded.
-
-        See get_blob_to_* for high level functions that handle the download
-        of large blobs with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            When this is set to True and specified together with the Range header, 
-            the service returns the MD5 hash for the range, as long as the range 
-            is less than or equal to 4 MB in size.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A Blob with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_decryption_required(self.require_encryption,
-                                      self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        start_offset, end_offset = 0, 0
-        if self.key_encryption_key is not None or self.key_resolver_function is not None:
-            if start_range is not None:
-                # Align the start of the range along a 16 byte block
-                start_offset = start_range % 16
-                start_range -= start_offset
-
-                # Include an extra 16 bytes for the IV if necessary
-                # Because of the previous offsetting, start_range will always
-                # be a multiple of 16.
-                if start_range > 0:
-                    start_offset += 16
-                    start_range -= 16
-
-            if end_range is not None:
-                # Align the end of the range along a 16 byte block
-                end_offset = 15 - (end_range % 16)
-                end_range += end_offset
-
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=validate_content)
-
-        return self._perform_request(request, _parse_blob,
-                                     [blob_name, snapshot, validate_content, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function,
-                                      start_offset, end_offset],
-                                     operation_context=_context)
-
-    def get_blob_to_path(
-            self, container_name, blob_name, file_path, open_mode='wb',
-            snapshot=None, start_range=None, end_range=None,
-            validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None):
-        '''
-        Downloads a blob to a file path, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str file_path:
-            Path of file to write out to.
-        :param str open_mode:
-            Mode to use when opening the file. Note that specifying append only 
-            open_mode prevents parallel download. So, max_connections must be set 
-            to 1 if this open_mode is used.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        if max_connections > 1 and 'a' in open_mode:
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        with open(file_path, open_mode) as stream:
-            blob = self.get_blob_to_stream(
-                container_name,
-                blob_name,
-                stream,
-                snapshot,
-                start_range,
-                end_range,
-                validate_content,
-                progress_callback,
-                max_connections,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout)
-
-        return blob
-
-    def get_blob_to_stream(
-            self, container_name, blob_name, stream, snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-
-        '''
-        Downloads a blob to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param io.IOBase stream:
-            Opened stream to write to.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-
-        if end_range is not None:
-            _validate_not_none("start_range", start_range)
-
-        # If the user explicitly sets max_connections to 1, do a single shot download
-        if max_connections == 1:
-            blob = self._get_blob(container_name,
-                                  blob_name,
-                                  snapshot,
-                                  start_range=start_range,
-                                  end_range=end_range,
-                                  validate_content=validate_content,
-                                  lease_id=lease_id,
-                                  if_modified_since=if_modified_since,
-                                  if_unmodified_since=if_unmodified_since,
-                                  if_match=if_match,
-                                  if_none_match=if_none_match,
-                                  timeout=timeout)
-
-            # Set the download size
-            download_size = blob.properties.content_length
-
-        # If max_connections is greater than 1, do the first get to establish the 
-        # size of the blob and get the first segment of data
-        else:
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-            # The service only provides transactional MD5s for chunks under 4MB.           
-            # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first 
-            # chunk so a transactional MD5 can be retrieved.
-            first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
-
-            initial_request_start = start_range if start_range is not None else 0
-
-            if end_range is not None and end_range - start_range < first_get_size:
-                initial_request_end = end_range
-            else:
-                initial_request_end = initial_request_start + first_get_size - 1
-
-            # Send a context object to make sure we always retry to the initial location
-            operation_context = _OperationContext(location_lock=True)
-            try:
-                blob = self._get_blob(container_name,
-                                      blob_name,
-                                      snapshot,
-                                      start_range=initial_request_start,
-                                      end_range=initial_request_end,
-                                      validate_content=validate_content,
-                                      lease_id=lease_id,
-                                      if_modified_since=if_modified_since,
-                                      if_unmodified_since=if_unmodified_since,
-                                      if_match=if_match,
-                                      if_none_match=if_none_match,
-                                      timeout=timeout,
-                                      _context=operation_context)
-
-                # Parse the total blob size and adjust the download size if ranges 
-                # were specified
-                blob_size = _parse_length_from_content_range(blob.properties.content_range)
-                if end_range is not None:
-                    # Use the end_range unless it is over the end of the blob
-                    download_size = min(blob_size, end_range - start_range + 1)
-                elif start_range is not None:
-                    download_size = blob_size - start_range
-                else:
-                    download_size = blob_size
-            except AzureHttpError as ex:
-                if start_range is None and ex.status_code == 416:
-                    # Get range will fail on an empty blob. If the user did not 
-                    # request a range, do a regular get request in order to get 
-                    # any properties.
-                    blob = self._get_blob(container_name,
-                                          blob_name,
-                                          snapshot,
-                                          validate_content=validate_content,
-                                          lease_id=lease_id,
-                                          if_modified_since=if_modified_since,
-                                          if_unmodified_since=if_unmodified_since,
-                                          if_match=if_match,
-                                          if_none_match=if_none_match,
-                                          timeout=timeout,
-                                          _context=operation_context)
-
-                    # Set the download size to empty
-                    download_size = 0
-                else:
-                    raise ex
-
-        # Mark the first progress chunk. If the blob is small or this is a single 
-        # shot download, this is the only call
-        if progress_callback:
-            progress_callback(blob.properties.content_length, download_size)
-
-        # Write the content to the user stream  
-        # Clear blob content since output has been written to user stream   
-        if blob.content is not None:
-            stream.write(blob.content)
-            blob.content = None
-
-        # If the blob is small or single shot download was used, the download is 
-        # complete at this point. If blob size is large, use parallel download.
-        if blob.properties.content_length != download_size:
-            # Lock on the etag. This can be overriden by the user by specifying '*'
-            if_match = if_match if if_match is not None else blob.properties.etag
-
-            end_blob = blob_size
-            if end_range is not None:
-                # Use the end_range unless it is over the end of the blob
-                end_blob = min(blob_size, end_range + 1)
-
-            _download_blob_chunks(
-                self,
-                container_name,
-                blob_name,
-                snapshot,
-                download_size,
-                self.MAX_CHUNK_GET_SIZE,
-                first_get_size,
-                initial_request_end + 1,  # start where the first download ended
-                end_blob,
-                stream,
-                max_connections,
-                progress_callback,
-                validate_content,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout,
-                operation_context
-            )
-
-            # Set the content length to the download size instead of the size of 
-            # the last range
-            blob.properties.content_length = download_size
-
-            # Overwrite the content range to the user requested range
-            blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size)
-
-            # Overwrite the content MD5 as it is the MD5 for the last range instead 
-            # of the stored MD5
-            # TODO: Set to the stored MD5 when the service returns this
-            blob.properties.content_md5 = None
-
-        return blob
-
-    def get_blob_to_bytes(
-            self, container_name, blob_name, snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Downloads a blob as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        stream = BytesIO()
-        blob = self.get_blob_to_stream(
-            container_name,
-            blob_name,
-            stream,
-            snapshot,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            lease_id,
-            if_modified_since,
-            if_unmodified_since,
-            if_match,
-            if_none_match,
-            timeout)
-
-        blob.content = stream.getvalue()
-        return blob
-
-    def get_blob_to_text(
-            self, container_name, blob_name, encoding='utf-8', snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Downloads a blob as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str encoding:
-            Python encoding to use when decoding the blob data.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('encoding', encoding)
-
-        blob = self.get_blob_to_bytes(container_name,
-                                      blob_name,
-                                      snapshot,
-                                      start_range,
-                                      end_range,
-                                      validate_content,
-                                      progress_callback,
-                                      max_connections,
-                                      lease_id,
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        blob.content = blob.content.decode(encoding)
-        return blob
-
-    def get_blob_metadata(
-            self, container_name, blob_name, snapshot=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified blob or snapshot.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the blob metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_blob_metadata(self, container_name, blob_name,
-                          metadata=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None,
-                          if_match=None, if_none_match=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified blob as one or more
-        name-value pairs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the blob. To remove all
-            metadata from the blob, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _lease_blob_impl(self, container_name, blob_name,
-                         lease_action, lease_id,
-                         lease_duration, lease_break_period,
-                         proposed_lease_id, if_modified_since,
-                         if_unmodified_since, if_match, if_none_match, timeout=None):
-        '''
-        Establishes and manages a lease on a blob for write and delete operations.
-        The Lease Blob operation can be called in one of five modes:
-            Acquire, to request a new lease.
-            Renew, to renew an existing lease.
-            Change, to change the ID of an existing lease.
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the blob.
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_action:
-            Possible _LeaseActions acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for acquire, required for change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'lease',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-lease-action': _to_str(lease_action),
-            'x-ms-lease-duration': _to_str(lease_duration),
-            'x-ms-lease-break-period': _to_str(lease_break_period),
-            'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_lease)
-
-    def acquire_blob_lease(self, container_name, blob_name,
-                           lease_duration=-1,
-                           proposed_lease_id=None,
-                           if_modified_since=None,
-                           if_unmodified_since=None,
-                           if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Requests a new lease. If the blob does not have an active lease, the Blob
-        service creates a lease on the blob and returns a new lease ID.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-
-        if lease_duration != -1 and \
-                (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Acquire,
-                                      None,  # lease_id
-                                      lease_duration,
-                                      None,  # lease_break_period
-                                      proposed_lease_id,
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['id']
-
-    def renew_blob_lease(self, container_name, blob_name,
-                         lease_id, if_modified_since=None,
-                         if_unmodified_since=None, if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Renews the lease. The lease can be renewed if the lease ID specified on
-        the request matches that associated with the blob. Note that the lease may
-        be renewed even if it has expired as long as the blob has not been modified
-        or leased again since the expiration of that lease. When you renew a lease,
-        the lease duration clock resets. 
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Renew,
-                                      lease_id,
-                                      None,  # lease_duration
-                                      None,  # lease_break_period
-                                      None,  # proposed_lease_id
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['id']
-
-    def release_blob_lease(self, container_name, blob_name,
-                           lease_id, if_modified_since=None,
-                           if_unmodified_since=None, if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Releases the lease. The lease may be released if the lease ID specified on the
-        request matches that associated with the blob. Releasing the lease allows another
-        client to immediately acquire the lease for the blob as soon as the release is complete. 
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_blob_impl(container_name,
-                              blob_name,
-                              _LeaseActions.Release,
-                              lease_id,
-                              None,  # lease_duration
-                              None,  # lease_break_period
-                              None,  # proposed_lease_id
-                              if_modified_since,
-                              if_unmodified_since,
-                              if_match,
-                              if_none_match,
-                              timeout)
-
-    def break_blob_lease(self, container_name, blob_name,
-                         lease_break_period=None,
-                         if_modified_since=None,
-                         if_unmodified_since=None,
-                         if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Breaks the lease, if the blob has an active lease. Once a lease is broken,
-        it cannot be renewed. Any authorized request can break the lease; the request
-        is not required to specify a matching lease ID. When a lease is broken,
-        the lease break period is allowed to elapse, during which time no lease operation
-        except break and release can be performed on the blob. When a lease is successfully
-        broken, the response indicates the interval in seconds until a new lease can be acquired. 
-
-        A lease that has been broken can also be released, in which case another client may
-        immediately acquire the lease on the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Break,
-                                      None,  # lease_id
-                                      None,  # lease_duration
-                                      lease_break_period,
-                                      None,  # proposed_lease_id
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['time']
-
-    def change_blob_lease(self, container_name, blob_name,
-                          lease_id,
-                          proposed_lease_id,
-                          if_modified_since=None,
-                          if_unmodified_since=None,
-                          if_match=None,
-                          if_none_match=None, timeout=None):
-        '''
-        Changes the lease ID of an active lease. A change must include the current
-        lease ID and a new lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        self._lease_blob_impl(container_name,
-                              blob_name,
-                              _LeaseActions.Change,
-                              lease_id,
-                              None,  # lease_duration
-                              None,  # lease_break_period
-                              proposed_lease_id,
-                              if_modified_since,
-                              if_unmodified_since,
-                              if_match,
-                              if_none_match,
-                              timeout)
-
-    def snapshot_blob(self, container_name, blob_name,
-                      metadata=None, if_modified_since=None,
-                      if_unmodified_since=None, if_match=None,
-                      if_none_match=None, lease_id=None, timeout=None):
-        '''
-        Creates a read-only snapshot of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Specifies a user-defined name-value pair associated with the blob.
-            If no name-value pairs are specified, the operation will copy the
-            base blob metadata to the snapshot. If one or more name-value pairs
-            are specified, the snapshot is created with the specified metadata,
-            and metadata is not copied from the base blob.
-        :type metadata: dict(str, str)
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'snapshot',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_snapshot_blob, [blob_name])
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None):
-        '''
-        Copies a blob asynchronously. This operation returns a copy operation 
-        properties object, including a copy ID you can use to check or abort the 
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation may be a block blob, an append blob, 
-        or a page blob. If the destination blob already exists, it must be of the 
-        same blob type as the source blob. Any existing destination blob will be 
-        overwritten. The destination blob cannot be modified while a copy operation 
-        is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page 
-        blob of the source blob's length, initially containing all zeroes. Then 
-        the source page ranges are enumerated, and non-empty ranges are copied. 
-
-        For a block blob or an append blob, the Blob service creates a committed 
-        blob of zero length before returning from this operation. When copying 
-        from a block blob, all committed blocks and their block IDs are copied. 
-        Uncommitted blocks are not copied. At the end of the copy operation, the 
-        destination blob will have the same committed block count as the source.
-
-        When copying from an append blob, all committed blocks are copied. At the 
-        end of the copy operation, the destination blob will have the same committed 
-        block count as the source.
-
-        For all blob types, you can call get_blob_properties on the destination 
-        blob to check the status of the copy operation. The final blob will be 
-        committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will 
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob. 
-            The value should be URL-encoded as it would appear in a request URI. 
-            If the source is in another account, the source must either be public 
-            or must be authenticated via a shared access signature. If the source 
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value 
-            pairs are specified, the operation will copy the metadata from the 
-            source blob or file to the destination blob. If one or more name-value 
-            pairs are specified, the destination blob is created with the specified 
-            metadata, and metadata is not copied from the source blob or file. 
-        :type metadata: dict(str, str)
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.  
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               None,
-                               source_if_modified_since, source_if_unmodified_since,
-                               source_if_match, source_if_none_match,
-                               destination_if_modified_since,
-                               destination_if_unmodified_since,
-                               destination_if_match,
-                               destination_if_none_match,
-                               destination_lease_id,
-                               source_lease_id, timeout,
-                               False)
-
-    def _copy_blob(self, container_name, blob_name, copy_source,
-                   metadata=None,
-                   premium_page_blob_tier=None,
-                   source_if_modified_since=None,
-                   source_if_unmodified_since=None,
-                   source_if_match=None, source_if_none_match=None,
-                   destination_if_modified_since=None,
-                   destination_if_unmodified_since=None,
-                   destination_if_match=None,
-                   destination_if_none_match=None,
-                   destination_lease_id=None,
-                   source_lease_id=None, timeout=None,
-                   incremental_copy=False):
-        '''
-        See copy_blob for more details. This helper method
-        allows for standard copies as well as incremental copies which are only supported for page blobs.
-        :param bool incremental_copy:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source', copy_source)
-
-        if copy_source.startswith('/'):
-            # Backwards compatibility for earlier versions of the SDK where
-            # the copy source can be in the following formats:
-            # - Blob in named container:
-            #     /accountName/containerName/blobName
-            # - Snapshot in named container:
-            #     /accountName/containerName/blobName?snapshot=<DateTime>
-            # - Blob in root container:
-            #     /accountName/blobName
-            # - Snapshot in root container:
-            #     /accountName/blobName?snapshot=<DateTime>
-            account, _, source = \
-                copy_source.partition('/')[2].partition('/')
-            copy_source = self.protocol + '://' + \
-                          self.primary_endpoint + '/' + source
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-
-        if incremental_copy:
-            request.query = {
-                'comp': 'incrementalcopy',
-                'timeout': _int_to_str(timeout),
-            }
-        else:
-            request.query = {'timeout': _int_to_str(timeout)}
-
-        request.headers = {
-            'x-ms-copy-source': _to_str(copy_source),
-            'x-ms-source-if-modified-since': _to_str(source_if_modified_since),
-            'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since),
-            'x-ms-source-if-match': _to_str(source_if_match),
-            'x-ms-source-if-none-match': _to_str(source_if_none_match),
-            'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since),
-            'If-Match': _to_str(destination_if_match),
-            'If-None-Match': _to_str(destination_if_none_match),
-            'x-ms-lease-id': _to_str(destination_lease_id),
-            'x-ms-source-lease-id': _to_str(source_lease_id),
-            'x-ms-access-tier': _to_str(premium_page_blob_tier)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_properties, [BlobProperties]).copy
-
-    def abort_copy_blob(self, container_name, blob_name, copy_id,
-                        lease_id=None, timeout=None):
-        '''
-         Aborts a pending copy_blob operation, and leaves a destination blob
-         with zero length and full metadata.
-
-         :param str container_name:
-             Name of destination container.
-         :param str blob_name:
-             Name of destination blob.
-         :param str copy_id:
-             Copy identifier provided in the copy.id of the original
-             copy_blob operation.
-         :param str lease_id:
-             Required if the destination blob has an active infinite lease.
-         :param int timeout:
-             The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'copy',
-            'copyid': _to_str(copy_id),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-copy-action': 'abort',
-        }
-
-        self._perform_request(request)
-
-    def delete_blob(self, container_name, blob_name, snapshot=None,
-                    lease_id=None, delete_snapshots=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Marks the specified blob or snapshot for deletion.
-        The blob is later deleted during garbage collection.
-
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the Delete
-        Blob operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
-        and retains the blob or snapshot for specified number of days.
-        After specified number of days, blob's data is removed from the service during garbage collection.
-        Soft deleted blob or snapshot is accessible through List Blobs API specifying include=Include.Deleted option.
-        Soft-deleted blob or snapshot can be restored using Undelete API.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to delete.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots:
-            Required if the blob has associated snapshots.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-delete-snapshots': _to_str(delete_snapshots),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout)
-        }
-
-        self._perform_request(request)
-
-    def undelete_blob(self, container_name, blob_name, timeout=None):
-        '''
-        The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot.
-        Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'undelete',
-            'timeout': _int_to_str(timeout)
-        }
-
-        self._perform_request(request)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/blockblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/blockblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/blockblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/blockblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1007 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from io import (
-    BytesIO
-)
-from os import (
-    path,
-)
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_VALUE_SHOULD_BE_STREAM
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _get_data_bytes_only,
-    _get_data_bytes_or_stream_only,
-    _add_metadata_headers,
-)
-from ..common._serialization import (
-    _len_plus
-)
-from ._deserialization import (
-    _convert_xml_to_block_list,
-    _parse_base_properties,
-)
-from ._encryption import (
-    _encrypt_blob,
-    _generate_blob_encryption_data,
-)
-from ._serialization import (
-    _convert_block_list_to_xml,
-    _get_path,
-)
-from ._upload_chunking import (
-    _BlockBlobChunkUploader,
-    _upload_blob_chunks,
-    _upload_blob_substream_blocks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-)
-
-
-class BlockBlobService(BaseBlobService):
-    '''
-    Block blobs let you upload large blobs efficiently. Block blobs are comprised
-    of blocks, each of which is identified by a block ID. You create or modify a
-    block blob by writing a set of blocks and committing them by their block IDs.
-    Each block can be a different size, up to a maximum of 100 MB, and a block blob
-    can include up to 50,000 blocks. The maximum size of a block blob is therefore
-    approximately 4.75 TB (100 MB X 50,000 blocks). If you are writing a block
-    blob that is no more than 64 MB in size, you can upload it in its entirety with
-    a single write operation; see create_blob_from_bytes.
-
-    :ivar int MAX_SINGLE_PUT_SIZE:
-        The largest size upload supported in a single put call. This is used by
-        the create_blob_from_* methods if the content length is known and is less
-        than this value.
-    :ivar int MAX_BLOCK_SIZE:
-        The size of the blocks put by create_blob_from_* methods if the content
-        length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks
-        may be put. The maximum block size the service supports is 100MB.
-    :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD:
-        The minimum block size at which the the memory-optimized, block upload
-        algorithm is considered. This algorithm is only applicable to the create_blob_from_file and
-        create_blob_from_stream methods and will prevent the full buffering of blocks.
-        In addition to the block size, ContentMD5 validation and Encryption must be disabled as
-        these options require the blocks to be buffered.
-    '''
-
-    MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-    MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None,
-                 request_session=None, connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests
-            signed with an account key and to construct the storage endpoint. It
-            is required unless a connection string is given, or if a custom
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication.
-            If neither account key or sas token is specified, anonymous access
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests
-             instead of the account key. If account key and sas token are both
-             specified, account key will be used to sign. If neither are
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will
-            override all other parameters besides connection string and request
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use the China cloud
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        self.blob_type = _BlobTypes.BlockBlob
-        super(BlockBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout, token_credential)
-
-    def put_block(self, container_name, blob_name, block, block_id,
-                  validate_content=False, lease_id=None, timeout=None):
-        '''
-        Creates a new block to be committed as part of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param block: Content of the block.
-        :type block: io.IOBase or bytes
-            Content of the block.
-        :param str block_id:
-            A valid Base64 string value that identifies the block. Prior to
-            encoding, the string must be less than or equal to 64 bytes in size.
-            For a given blob, the length of the value specified for the blockid
-            parameter must be the same size for each block. Note that the Base64
-            string must be URL-encoded.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        self._put_block(
-            container_name,
-            blob_name,
-            block,
-            block_id,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            timeout=timeout
-        )
-
-    def put_block_list(
-            self, container_name, blob_name, block_list, content_settings=None,
-            metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None):
-        '''
-        Writes a blob by specifying the list of block IDs that make up the blob.
-        In order to be written as part of a blob, a block must have been
-        successfully written to the server in a prior Put Block operation.
-
-        You can call Put Block List to update a blob by uploading only those
-        blocks that have changed, then committing the new and existing blocks
-        together. You can do this by specifying whether to commit a block from
-        the committed block list or from the uncommitted block list, or to commit
-        the most recently uploaded version of the block, whichever list it may
-        belong to.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param block_list:
-            A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state.
-        :type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`)
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block list content. The storage
-            service checks the hash of the block list content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this check is associated with
-            the block list content, and not with the content of the blob itself.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._put_block_list(
-            container_name,
-            blob_name,
-            block_list,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def get_block_list(self, container_name, blob_name, snapshot=None,
-                       block_list_type=None, lease_id=None, timeout=None):
-        '''
-        Retrieves the list of blocks that have been uploaded as part of a
-        block blob. There are two block lists maintained for a blob:
-            Committed Block List:
-                The list of blocks that have been successfully committed to a
-                given blob with Put Block List.
-            Uncommitted Block List:
-                The list of blocks that have been uploaded for a blob using
-                Put Block, but that have not yet been committed. These blocks
-                are stored in Azure in association with a blob, but do not yet
-                form part of the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            Datetime to determine the time to retrieve the blocks.
-        :param str block_list_type:
-            Specifies whether to return the list of committed blocks, the list
-            of uncommitted blocks, or both lists together. Valid values are:
-            committed, uncommitted, or all.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: list committed and/or uncommitted blocks for Block Blob
-        :rtype: :class:`~azure.storage.blob.models.BlobBlockList`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'blocklist',
-            'snapshot': _to_str(snapshot),
-            'blocklisttype': _to_str(block_list_type),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _convert_xml_to_block_list)
-
-    # ----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-            self, container_name, blob_name, file_path, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-    def create_blob_from_stream(
-            self, container_name, blob_name, stream, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None, use_byte_buffer=False):
-        '''
-        Creates a new blob from a file/stream, or updates the content of
-        an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB. Note that parallel upload requires the stream to be seekable.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :param bool use_byte_buffer:
-            If True, this will force usage of the original full block buffering upload path.
-            By default, this value is False and will employ a memory-efficient,
-            streaming upload algorithm under the following conditions:
-            The provided stream is seekable, 'require_encryption' is False, and
-            MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD.
-            One should consider the drawbacks of using this approach. In order to achieve
-            memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks
-            using a SubStream wrapper. In order to read the correct data, each SubStream must acquire
-            a lock so that it can safely seek to the right position on the shared, underlying stream.
-            If max_connections > 1, the concurrency will result in a considerable amount of seeking on
-            the underlying stream. For the most common inputs such as a file-like stream object, seeking
-            is an inexpensive operation and this is not much of a concern. However, for other variants of streams
-            this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking
-            with your input stream.
-            The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of
-            seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        # Adjust count to include padding if we are expected to encrypt.
-        adjusted_count = count
-        if (self.key_encryption_key is not None) and (adjusted_count is not None):
-            adjusted_count += (16 - (count % 16))
-
-        # Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE
-        if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE):
-            if progress_callback:
-                progress_callback(0, count)
-
-            data = stream.read(count)
-            resp = self._put_blob(
-                container_name=container_name,
-                blob_name=blob_name,
-                blob=data,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-            if progress_callback:
-                progress_callback(count, count)
-
-            return resp
-        else:  # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls
-            cek, iv, encryption_data = None, None, None
-
-            use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \
-                                       self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \
-                                       hasattr(stream, 'seekable') and not stream.seekable() or \
-                                       not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
-
-            if use_original_upload_path:
-                if self.key_encryption_key:
-                    cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
-
-                block_ids = _upload_blob_chunks(
-                    blob_service=self,
-                    container_name=container_name,
-                    blob_name=blob_name,
-                    blob_size=count,
-                    block_size=self.MAX_BLOCK_SIZE,
-                    stream=stream,
-                    max_connections=max_connections,
-                    progress_callback=progress_callback,
-                    validate_content=validate_content,
-                    lease_id=lease_id,
-                    uploader_class=_BlockBlobChunkUploader,
-                    timeout=timeout,
-                    content_encryption_key=cek,
-                    initialization_vector=iv
-                )
-            else:
-                block_ids = _upload_blob_substream_blocks(
-                    blob_service=self,
-                    container_name=container_name,
-                    blob_name=blob_name,
-                    blob_size=count,
-                    block_size=self.MAX_BLOCK_SIZE,
-                    stream=stream,
-                    max_connections=max_connections,
-                    progress_callback=progress_callback,
-                    validate_content=validate_content,
-                    lease_id=lease_id,
-                    uploader_class=_BlockBlobChunkUploader,
-                    timeout=timeout,
-                )
-
-            return self._put_block_list(
-                container_name=container_name,
-                blob_name=blob_name,
-                block_list=block_ids,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout,
-                encryption_data=encryption_data
-            )
-
-    def create_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            use_byte_buffer=True
-        )
-
-    def create_blob_from_text(
-            self, container_name, blob_name, text, encoding='utf-8',
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from str/unicode, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        return self.create_blob_from_bytes(
-            container_name=container_name,
-            blob_name=blob_name,
-            blob=text,
-            index=0,
-            count=len(text),
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout)
-
-    def set_standard_blob_tier(
-        self, container_name, blob_name, standard_blob_tier, timeout=None):
-        '''
-        Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to update.
-        :param StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('standard_blob_tier', standard_blob_tier)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'tier',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-access-tier': _to_str(standard_blob_tier)
-        }
-
-        self._perform_request(request)
-
-    # -----Helper methods------------------------------------
-    def _put_blob(self, container_name, blob_name, blob, content_settings=None,
-                  metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-                  if_unmodified_since=None, if_match=None, if_none_match=None,
-                  timeout=None):
-        '''
-        Creates a blob or updates an existing blob.
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as bytes (size < 64MB). For larger size, you
-            must call put_block and put_block_list to set content of blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the blob content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the new Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-        blob = _get_data_bytes_only('blob', blob)
-        if self.key_encryption_key:
-            encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key)
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-        request.body = blob
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _put_block(self, container_name, blob_name, block, block_id,
-                   validate_content=False, lease_id=None, timeout=None):
-        '''
-        See put_block for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_not_none('block_id', block_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'block',
-            'blockid': _encode_base64(_to_str(block_id)),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        request.body = _get_data_bytes_or_stream_only('block', block)
-        if hasattr(request.body, 'read'):
-            if _len_plus(request.body) is None:
-                try:
-                    data = b''
-                    for chunk in iter(lambda: request.body.read(4096), b""):
-                        data += chunk
-                    request.body = data
-                except AttributeError:
-                    raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body'))
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        self._perform_request(request)
-
-    def _put_block_list(
-            self, container_name, blob_name, block_list, content_settings=None,
-            metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None, encryption_data=None):
-        '''
-        See put_block_list for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        :param str encryption_data:
-            A JSON formatted string containing the encryption metadata generated for this 
-            blob if it was encrypted all at once upon upload. This should only be passed
-            in by internal methods.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block_list', block_list)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'blocklist',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-        request.body = _get_request_body(
-            _convert_block_list_to_xml(block_list))
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        if encryption_data is not None:
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-
-        return self._perform_request(request, _parse_base_properties)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/models.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,764 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _to_str
-
-
-class Container(object):
-    '''
-    Blob container class. 
-    
-    :ivar str name: 
-        The name of the container.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the container as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list containers operation. If this parameter was specified but the 
-        container has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    :ivar ContainerProperties properties:
-        System properties for the container.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or ContainerProperties()
-        self.metadata = metadata
-
-
-class ContainerProperties(object):
-    '''
-    Blob container's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the container was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar LeaseProperties lease:
-        Stores all the lease information for the container.
-    :ivar bool has_immutability_policy:
-        Represents whether the container has an immutability policy.
-    :ivar bool has_legal_hold:
-        Represents whether the container has a legal hold.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.lease = LeaseProperties()
-        self.public_access = None
-        self.has_immutability_policy = None
-        self.has_legal_hold = None
-
-
-class Blob(object):
-    '''
-    Blob class.
-    
-    :ivar str name:
-        Name of blob.
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    :ivar content:
-        Blob content.
-    :vartype content: str or bytes
-    :ivar BlobProperties properties:
-        Stores all the system properties for the blob.
-    :ivar metadata:
-        Name-value pairs associated with the blob as metadata.
-    :ivar bool deleted:
-        Specify whether the blob was soft deleted.
-        In other words, if the blob is being retained by the delete retention policy,
-        this field would be True. The blob could be undeleted or it will be garbage collected after the specified
-        time period.
-    '''
-
-    def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None, deleted=False):
-        self.name = name
-        self.snapshot = snapshot
-        self.content = content
-        self.properties = props or BlobProperties()
-        self.metadata = metadata
-        self.deleted = deleted
-
-
-class BlobProperties(object):
-    '''
-    Blob Properties
-    
-    :ivar str blob_type:
-        String indicating this blob's type.
-    :ivar datetime last_modified:
-        A datetime object representing the last time the blob was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The length of the content returned. If the entire blob was requested, 
-        the length of blob in bytes. If a subset of the blob was requested, the 
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client 
-        requested a subset of the blob.
-    :ivar int append_blob_committed_block_count:
-        (For Append Blobs) Number of committed blocks in the blob.
-    :ivar int page_blob_sequence_number:
-        (For Page Blobs) Sequence number for page blob used for coordinating
-        concurrent writes.
-    :ivar bool server_encrypted:
-        Set to true if the blob is encrypted on the server.
-    :ivar ~azure.storage.blob.models.CopyProperties copy:
-        Stores all the copy properties for the blob.
-    :ivar ~azure.storage.blob.models.ContentSettings content_settings:
-        Stores all the content settings for the blob.
-    :ivar ~azure.storage.blob.models.LeaseProperties lease:
-        Stores all the lease information for the blob.
-    :ivar StandardBlobTier blob_tier:
-        Indicates the access tier of the blob. The hot tier is optimized
-        for storing data that is accessed frequently. The cool storage tier
-        is optimized for storing data that is infrequently accessed and stored
-        for at least a month. The archive tier is optimized for storing
-        data that is rarely accessed and stored for at least six months
-        with flexible latency requirements.
-    :ivar datetime blob_tier_change_time:
-        Indicates when the access tier was last changed.
-    :ivar bool blob_tier_inferred:
-        Indicates whether the access tier was inferred by the service.
-        If false, it indicates that the tier was set explicitly.
-    :ivar datetime deleted_time:
-        A datetime object representing the time at which the blob was deleted.
-    :ivar int remaining_retention_days:
-        The number of days that the blob will be retained before being permanently deleted by the service.
-    :ivar datetime creation_time:
-        Indicates when the blob was created, in UTC.
-    '''
-
-    def __init__(self):
-        self.blob_type = None
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_range = None
-        self.append_blob_committed_block_count = None
-        self.page_blob_sequence_number = None
-        self.server_encrypted = None
-        self.copy = CopyProperties()
-        self.content_settings = ContentSettings()
-        self.lease = LeaseProperties()
-        self.blob_tier = None
-        self.blob_tier_change_time = None
-        self.blob_tier_inferred = False
-        self.deleted_time = None
-        self.remaining_retention_days = None
-        self.creation_time = None
-
-
-class ContentSettings(object):
-    '''
-    Used to store the content settings of a blob.
-    
-    :ivar str content_type:
-        The content type specified for the blob. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If the content_encoding has previously been set
-        for the blob, that value is stored.
-    :ivar str content_language:
-        If the content_language has previously been set
-        for the blob, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the blob, that value is stored.
-    :ivar str cache_control:
-        If the cache_control has previously been set for
-        the blob, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the blob, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None):
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return {
-            'x-ms-blob-cache-control': _to_str(self.cache_control),
-            'x-ms-blob-content-type': _to_str(self.content_type),
-            'x-ms-blob-content-disposition': _to_str(self.content_disposition),
-            'x-ms-blob-content-md5': _to_str(self.content_md5),
-            'x-ms-blob-content-encoding': _to_str(self.content_encoding),
-            'x-ms-blob-content-language': _to_str(self.content_language),
-        }
-
-
-class CopyProperties(object):
-    '''
-    Blob Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy Blob operation where this blob
-        was the destination blob. This header does not appear if this blob has never
-        been the destination in a Copy Blob operation, or if this blob has been
-        modified after a concluded Copy Blob operation using Set Blob Properties,
-        Put Blob, or Put Block List.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source blob used in the last attempted
-        Copy Blob operation where this blob was the destination blob. This header does not
-        appear if this blob has never been the destination in a Copy Blob operation, or if
-        this blob has been modified after a concluded Copy Blob operation using
-        Set Blob Properties, Put Blob, or Put Block List.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending:
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy Blob.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy Blob operation where this blob was the destination blob. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy Blob operation where this blob was the
-        destination blob. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class LeaseProperties(object):
-    '''
-    Blob Lease Properties.
-    
-    :ivar str status:
-        The lease status of the blob.
-        Possible values: locked|unlocked
-    :ivar str state:
-        Lease state of the blob.
-        Possible values: available|leased|expired|breaking|broken
-    :ivar str duration:
-        When a blob is leased, specifies whether the lease is of infinite or fixed duration.
-    '''
-
-    def __init__(self):
-        self.status = None
-        self.state = None
-        self.duration = None
-
-
-class BlobPrefix(object):
-    '''
-    BlobPrefix objects may potentially returned in the blob list when 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is 
-    used with a delimiter. Prefixes can be thought of as virtual blob directories.
-    
-    :ivar str name: The name of the blob prefix.
-    '''
-
-    def __init__(self):
-        self.name = None
-
-
-class BlobBlockState(object):
-    '''Block blob block types.'''
-
-    Committed = 'Committed'
-    '''Committed blocks.'''
-
-    Latest = 'Latest'
-    '''Latest blocks.'''
-
-    Uncommitted = 'Uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class BlobBlock(object):
-    '''
-    BlockBlob Block class.
-    
-    :ivar str id:
-        Block id.
-    :ivar str state:
-        Block state.
-        Possible valuse: committed|uncommitted
-    :ivar int size:
-        Block size in bytes.
-    '''
-
-    def __init__(self, id=None, state=BlobBlockState.Latest):
-        self.id = id
-        self.state = state
-
-    def _set_size(self, size):
-        self.size = size
-
-
-class BlobBlockList(object):
-    '''
-    Blob Block List class.
-   
-    :ivar committed_blocks:
-        List of committed blocks.
-    :vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
-    :ivar uncommitted_blocks:
-        List of uncommitted blocks.
-    :vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
-    '''
-
-    def __init__(self):
-        self.committed_blocks = list()
-        self.uncommitted_blocks = list()
-
-
-class PageRange(object):
-    '''
-    Page Range for page blob.
-    
-    :ivar int start:
-        Start of page range in bytes.
-    :ivar int end:
-        End of page range in bytes.
-    :ivar bool is_cleared:
-        Indicates if a page range is cleared or not. Only applicable
-        for get_page_range_diff API.
-    '''
-
-    def __init__(self, start=None, end=None, is_cleared=False):
-        self.start = start
-        self.end = end
-        self.is_cleared = is_cleared
-
-
-class ResourceProperties(object):
-    '''
-    Base response for a resource request.
-    
-    :ivar str etag:
-        Opaque etag value that can be used to check if resource
-        has been modified.
-    :ivar datetime last_modified:
-        Datetime for last time resource was modified.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-
-
-class AppendBlockProperties(ResourceProperties):
-    '''
-    Response for an append block request.
-    
-    :ivar int append_offset:
-        Position to start next append.
-    :ivar int committed_block_count:
-        Number of committed append blocks.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.append_offset = None
-        self.committed_block_count = None
-
-
-class PageBlobProperties(ResourceProperties):
-    '''
-    Response for a page request.
-    
-    :ivar int sequence_number:
-        Identifer for page blobs to help handle concurrent writes.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.sequence_number = None
-
-
-class PublicAccess(object):
-    '''
-    Specifies whether data in the container may be accessed publicly and the level of access.
-    '''
-
-    OFF = 'off'
-    '''
-    Specifies that there is no public read access for both the container and blobs within the container.
-    Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
-    '''
-
-    Blob = 'blob'
-    '''
-    Specifies public read access for blobs. Blob data within this container can be read 
-    via anonymous request, but container data is not available. Clients cannot enumerate 
-    blobs within the container via anonymous request.
-    '''
-
-    Container = 'container'
-    '''
-    Specifies full public read access for container and blob data. Clients can enumerate 
-    blobs within the container via anonymous request, but cannot enumerate containers 
-    within the storage account.
-    '''
-
-
-class DeleteSnapshot(object):
-    '''
-    Required if the blob has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the base blob and all of its snapshots.
-    '''
-
-    Only = 'only'
-    '''
-    Delete only the blob's snapshots and not the blob itself.
-    '''
-
-
-class BlockListType(object):
-    '''
-    Specifies whether to return the list of committed blocks, the list of uncommitted 
-    blocks, or both lists together.
-    '''
-
-    All = 'all'
-    '''Both committed and uncommitted blocks.'''
-
-    Committed = 'committed'
-    '''Committed blocks.'''
-
-    Uncommitted = 'uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class SequenceNumberAction(object):
-    '''Sequence number actions.'''
-
-    Increment = 'increment'
-    '''
-    Increments the value of the sequence number by 1. If specifying this option, 
-    do not include the x-ms-blob-sequence-number header.
-    '''
-
-    Max = 'max'
-    '''
-    Sets the sequence number to be the higher of the value included with the 
-    request and the value currently stored for the blob.
-    '''
-
-    Update = 'update'
-    '''Sets the sequence number to the value included with the request.'''
-
-
-class _LeaseActions(object):
-    '''Actions for a lease.'''
-
-    Acquire = 'acquire'
-    '''Acquire the lease.'''
-
-    Break = 'break'
-    '''Break the lease.'''
-
-    Change = 'change'
-    '''Change the lease ID.'''
-
-    Release = 'release'
-    '''Release the lease.'''
-
-    Renew = 'renew'
-    '''Renew the lease.'''
-
-
-class _BlobTypes(object):
-    '''Blob type options.'''
-
-    AppendBlob = 'AppendBlob'
-    '''Append blob type.'''
-
-    BlockBlob = 'BlockBlob'
-    '''Block blob type.'''
-
-    PageBlob = 'PageBlob'
-    '''Page blob type.'''
-
-
-class Include(object):
-    '''
-    Specifies the datasets to include in the blob list response.
-
-    :ivar ~azure.storage.blob.models.Include Include.COPY: 
-        Specifies that metadata related to any current or previous Copy Blob operation 
-        should be included in the response.
-    :ivar ~azure.storage.blob.models.Include Include.METADATA: 
-        Specifies that metadata be returned in the response.
-    :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: 
-        Specifies that snapshots should be included in the enumeration.
-    :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: 
-        Specifies that blobs for which blocks have been uploaded, but which have not 
-        been committed using Put Block List, be included in the response.
-    :ivar ~azure.storage.blob.models.Include Include.DELETED:
-        Specifies that deleted blobs should be returned in the response.
-    '''
-
-    def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False,
-                 copy=False, deleted=False, _str=None):
-        '''
-        :param bool snapshots:
-             Specifies that snapshots should be included in the enumeration.
-        :param bool metadata:
-            Specifies that metadata be returned in the response.
-        :param bool uncommitted_blobs:
-            Specifies that blobs for which blocks have been uploaded, but which have 
-            not been committed using Put Block List, be included in the response.
-        :param bool copy: 
-            Specifies that metadata related to any current or previous Copy Blob 
-            operation should be included in the response.
-        :param bool deleted:
-            Specifies that deleted blobs should be returned in the response.
-        :param str _str: 
-            A string representing the includes.
-        '''
-        if not _str:
-            _str = ''
-        components = _str.split(',')
-        self.snapshots = snapshots or ('snapshots' in components)
-        self.metadata = metadata or ('metadata' in components)
-        self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
-        self.copy = copy or ('copy' in components)
-        self.deleted = deleted or ('deleted' in components)
-
-    def __or__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __str__(self):
-        include = (('snapshots,' if self.snapshots else '') +
-                   ('metadata,' if self.metadata else '') +
-                   ('uncommittedblobs,' if self.uncommitted_blobs else '') +
-                   ('copy,' if self.copy else '') +
-                   ('deleted,' if self.deleted else ''))
-        return include.rstrip(',')
-
-
-Include.COPY = Include(copy=True)
-Include.METADATA = Include(metadata=True)
-Include.SNAPSHOTS = Include(snapshots=True)
-Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
-Include.DELETED = Include(deleted=True)
-
-
-class BlobPermissions(object):
-    '''
-    BlobPermissions class to be used with 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
-
-    :ivar BlobPermissions BlobPermissions.ADD:
-        Add a block to an append blob.
-    :ivar BlobPermissions BlobPermissions.CREATE:
-        Write a new blob, snapshot a blob, or copy a blob to a new blob.
-    :ivar BlobPermissions BlobPermissions.DELETE:
-        Delete the blob.
-    :ivar BlobPermissions BlobPermissions.READ:
-        Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
-    :ivar BlobPermissions BlobPermissions.WRITE:
-        Create or write content, properties, metadata, or block list. Snapshot or lease 
-        the blob. Resize the blob (page blob only). Use the blob as the destination of a 
-        copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, add=False, create=False, write=False,
-                 delete=False, _str=None):
-        '''    
-        :param bool read:
-            Read the content, properties, metadata and block list. Use the blob as 
-            the source of a copy operation.
-        :param bool add:
-            Add a block to an append blob.
-        :param bool create:
-            Write a new blob, snapshot a blob, or copy a blob to a new blob.
-        :param bool write: 
-            Create or write content, properties, metadata, or block list. Snapshot 
-            or lease the blob. Resize the blob (page blob only). Use the blob as the 
-            destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the blob.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-BlobPermissions.ADD = BlobPermissions(add=True)
-BlobPermissions.CREATE = BlobPermissions(create=True)
-BlobPermissions.DELETE = BlobPermissions(delete=True)
-BlobPermissions.READ = BlobPermissions(read=True)
-BlobPermissions.WRITE = BlobPermissions(write=True)
-
-
-class ContainerPermissions(object):
-    '''
-    ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
-    API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. 
-
-    :ivar ContainerPermissions ContainerPermissions.DELETE:
-        Delete any blob in the container. Note: You cannot grant permissions to 
-        delete a container with a container SAS. Use an account SAS instead.
-    :ivar ContainerPermissions ContainerPermissions.LIST:
-        List blobs in the container.
-    :ivar ContainerPermissions ContainerPermissions.READ:
-        Read the content, properties, metadata or block list of any blob in the 
-        container. Use any blob in the container as the source of a copy operation.
-    :ivar ContainerPermissions ContainerPermissions.WRITE:
-        For any blob in the container, create or write content, properties, 
-        metadata, or block list. Snapshot or lease the blob. Resize the blob 
-        (page blob only). Use the blob as the destination of a copy operation 
-        within the same account. Note: You cannot grant permissions to read or 
-        write container properties or metadata, nor to lease a container, with 
-        a container SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata or block list of any blob in the 
-            container. Use any blob in the container as the source of a copy operation.
-        :param bool write: 
-            For any blob in the container, create or write content, properties, 
-            metadata, or block list. Snapshot or lease the blob. Resize the blob 
-            (page blob only). Use the blob as the destination of a copy operation 
-            within the same account. Note: You cannot grant permissions to read or 
-            write container properties or metadata, nor to lease a container, with 
-            a container SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any blob in the container. Note: You cannot grant permissions to 
-            delete a container with a container SAS. Use an account SAS instead.
-        :param bool list: 
-            List blobs in the container.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-
-    def __or__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else ''))
-
-
-ContainerPermissions.DELETE = ContainerPermissions(delete=True)
-ContainerPermissions.LIST = ContainerPermissions(list=True)
-ContainerPermissions.READ = ContainerPermissions(read=True)
-ContainerPermissions.WRITE = ContainerPermissions(write=True)
-
-
-class PremiumPageBlobTier(object):
-    '''
-    Specifies the page blob tier to set the blob to. This is only applicable to page
-    blobs on premium storage accounts.
-    Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
-    for detailed information on the corresponding IOPS and throughtput per PageBlobTier.
-    '''
-
-    P4 = 'P4'
-    ''' P4 Tier '''
-
-    P6 = 'P6'
-    ''' P6 Tier '''
-
-    P10 = 'P10'
-    ''' P10 Tier '''
-
-    P20 = 'P20'
-    ''' P20 Tier '''
-
-    P30 = 'P30'
-    ''' P30 Tier '''
-
-    P40 = 'P40'
-    ''' P40 Tier '''
-
-    P50 = 'P50'
-    ''' P50 Tier '''
-
-    P60 = 'P60'
-    ''' P60 Tier '''
-
-
-class StandardBlobTier(object):
-    '''
-    Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts.
-    '''
-
-    Archive = 'Archive'
-    ''' Archive '''
-
-    Cool = 'Cool'
-    ''' Cool '''
-
-    Hot = 'Hot'
-    ''' Hot '''
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/pageblobservice.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/pageblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/pageblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/pageblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1392 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_data_bytes_only,
-    _add_metadata_headers,
-)
-from ._deserialization import (
-    _convert_xml_to_page_ranges,
-    _parse_page_properties,
-    _parse_base_properties,
-)
-from ._encryption import _generate_blob_encryption_data
-from ._error import (
-    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import (
-    _PageBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-    ResourceProperties)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
-_PAGE_ALIGNMENT = 512
-
-
-class PageBlobService(BaseBlobService):
-    '''
-    Page blobs are a collection of 512-byte pages optimized for random read and
-    write operations. To create a page blob, you initialize the page blob and
-    specify the maximum size the page blob will grow. To add or update the
-    contents of a page blob, you write a page or pages by specifying an offset
-    and a range that align to 512-byte page boundaries. A write to a page blob
-    can overwrite just one page, some pages, or up to 4 MB of the page blob.
-    Writes to page blobs happen in-place and are immediately committed to the
-    blob. The maximum size for a page blob is 8 TB.
-
-    :ivar int MAX_PAGE_SIZE: 
-        The size of the pages put by create_blob_from_* methods. Smaller pages 
-        may be put if there is less data provided. The maximum page size the service 
-        supports is 4MB. When using the create_blob_from_* methods, empty pages are skipped.
-    '''
-
-    MAX_PAGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None,
-                 request_session=None, connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        self.blob_type = _BlobTypes.PageBlob
-        super(PageBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout, token_credential)
-
-    def create_blob(
-            self, container_name, blob_name, content_length, content_settings=None,
-            sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new Page Blob.
-
-        See create_blob_from_* for high level functions that handle the
-        creation and upload of large blobs with automatic chunking and
-        progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param int content_length:
-            Required. This header specifies the maximum size
-            for the page blob, up to 1 TB. The page blob size must be aligned
-            to a 512-byte boundary.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param int sequence_number:
-            The sequence number is a user-controlled value that you can use to
-            track requests. The value of the sequence number must be between 0
-            and 2^63 - 1.The default value is 0.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the new Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._create_blob(
-            container_name,
-            blob_name,
-            content_length,
-            content_settings=content_settings,
-            sequence_number=sequence_number,
-            metadata=metadata,
-            lease_id=lease_id,
-            premium_page_blob_tier=premium_page_blob_tier,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def incremental_copy_blob(self, container_name, blob_name, copy_source,
-                              metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None,
-                              destination_if_match=None, destination_if_none_match=None, destination_lease_id=None,
-                              source_lease_id=None, timeout=None):
-        '''
-        Copies an incremental copy of a blob asynchronously. This operation returns a copy operation
-        properties object, including a copy ID you can use to check or abort the
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for an incremental copy operation must be a page blob.
-        Call get_blob_properties on the destination blob to check the status of the copy operation.
-        The final blob will be committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure page blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            The copy source must be a snapshot and include a valid SAS token or be public.
-            Example:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>&sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str).
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the destination blob
-            has not been modified since the specified ate/time. If the destination blob
-            has been modified, the Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               source_if_modified_since=None, source_if_unmodified_since=None,
-                               source_if_match=None, source_if_none_match=None,
-                               destination_if_modified_since=destination_if_modified_since,
-                               destination_if_unmodified_since=destination_if_unmodified_since,
-                               destination_if_match=destination_if_match,
-                               destination_if_none_match=destination_if_none_match,
-                               destination_lease_id=destination_lease_id,
-                               source_lease_id=source_lease_id, timeout=timeout,
-                               incremental_copy=True)
-
-    def update_page(
-            self, container_name, blob_name, page, start_range, end_range,
-            validate_content=False, lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        Updates a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes page:
-            Content of the page.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._update_page(
-            container_name,
-            blob_name,
-            page,
-            start_range,
-            end_range,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            if_sequence_number_lte=if_sequence_number_lte,
-            if_sequence_number_lt=if_sequence_number_lt,
-            if_sequence_number_eq=if_sequence_number_eq,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def clear_page(
-            self, container_name, blob_name, start_range, end_range,
-            lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        Clears a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'clear',
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def get_page_ranges(
-            self, container_name, blob_name, snapshot=None, start_range=None,
-            end_range=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Returns the list of valid page ranges for a Page Blob or snapshot
-        of a page blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve information
-            from.
-        :param int start_range:
-            Start of byte range to use for getting valid page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting valid page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of valid Page Ranges for the Page Blob.
-        :rtype: list(:class:`~azure.storage.blob.models.PageRange`)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'pagelist',
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        return self._perform_request(request, _convert_xml_to_page_ranges)
-
-    def get_page_ranges_diff(
-            self, container_name, blob_name, previous_snapshot, snapshot=None,
-            start_range=None, end_range=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        The response will include only the pages that are different between either a
-        recent snapshot or the current blob and a previous snapshot, including pages
-        that were cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str previous_snapshot:
-            The snapshot parameter is an opaque DateTime value that
-            specifies a previous blob snapshot to be compared
-            against a more recent snapshot or the current blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that
-            specifies a more recent blob snapshot to be compared
-            against a previous snapshot (previous_snapshot).
-        :param int start_range:
-            Start of byte range to use for getting different page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting different page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of different Page Ranges for the Page Blob.
-        :rtype: list(:class:`~azure.storage.blob.models.PageRange`)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('previous_snapshot', previous_snapshot)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'pagelist',
-            'snapshot': _to_str(snapshot),
-            'prevsnapshot': _to_str(previous_snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        return self._perform_request(request, _convert_xml_to_page_ranges)
-
-    def set_sequence_number(
-            self, container_name, blob_name, sequence_number_action, sequence_number=None,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Sets the blob sequence number.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str sequence_number_action:
-            This property indicates how the service should modify the blob's sequence
-            number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information.
-        :param str sequence_number:
-            This property sets the blob's sequence number. The sequence number is a
-            user-controlled property that you can use to track requests and manage
-            concurrency issues.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('sequence_number_action', sequence_number_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-sequence-number': _to_str(sequence_number),
-            'x-ms-sequence-number-action': _to_str(sequence_number_action),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def resize_blob(
-            self, container_name, blob_name, content_length,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Resizes a page blob to the specified size. If the specified value is less
-        than the current size of the blob, then all pages above the specified value
-        are cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int content_length:
-            Size to resize blob to.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-content-length': _to_str(content_length),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_page_properties)
-
-    # ----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-            self, container_name, blob_name, file_path, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None, max_connections=2,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-        Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout,
-                premium_page_blob_tier=premium_page_blob_tier)
-
-    def create_blob_from_stream(
-            self, container_name, blob_name, stream, count, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            premium_page_blob_tier=None):
-        '''
-        Creates a new blob from a file/stream, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-        Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a page
-            blob cannot be created if the count is unknown.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set the blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use. Note that parallel upload 
-            requires the stream to be seekable.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        if count < 0:
-            raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        if count % _PAGE_ALIGNMENT != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
-
-        cek, iv, encryption_data = None, None, None
-        if self.key_encryption_key is not None:
-            cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
-
-        response = self._create_blob(
-            container_name=container_name,
-            blob_name=blob_name,
-            content_length=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            lease_id=lease_id,
-            premium_page_blob_tier=premium_page_blob_tier,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            encryption_data=encryption_data
-        )
-
-        if count == 0:
-            return response
-
-        # _upload_blob_chunks returns the block ids for block blobs so resource_properties
-        # is passed as a parameter to get the last_modified and etag for page and append blobs.
-        # this info is not needed for block_blobs since _put_block_list is called after which gets this info
-        resource_properties = ResourceProperties()
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_PAGE_SIZE,
-            stream=stream,
-            max_connections=max_connections,
-            progress_callback=progress_callback,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            uploader_class=_PageBlobChunkUploader,
-            if_match=response.etag,
-            timeout=timeout,
-            content_encryption_key=cek,
-            initialization_vector=iv,
-            resource_properties=resource_properties
-        )
-
-        return resource_properties
-
-    def create_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications. Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the byte array.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            premium_page_blob_tier=premium_page_blob_tier)
-
-    def set_premium_page_blob_tier(
-            self, container_name, blob_name, premium_page_blob_tier,
-            timeout=None):
-        '''
-        Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to update.
-        :param PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('premium_page_blob_tier', premium_page_blob_tier)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'tier',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-access-tier': _to_str(premium_page_blob_tier)
-        }
-
-        self._perform_request(request)
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None,
-                  premium_page_blob_tier=None):
-        '''
-        Copies a blob asynchronously. This operation returns a copy operation
-        properties object, including a copy ID you can use to check or abort the
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation must be a page blob. If the destination
-        blob already exists, it must be of the same blob type as the source blob.
-        Any existing destination blob will be overwritten.
-        The destination blob cannot be modified while a copy operation is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page
-        blob of the source blob's length, initially containing all zeroes. Then
-        the source page ranges are enumerated, and non-empty ranges are copied.
-
-        If the tier on the source blob is larger than the tier being passed to this
-        copy operation or if the size of the blob exceeds the tier being passed to
-        this copy operation then the operation will fail.
-
-        You can call get_blob_properties on the destination
-        blob to check the status of the copy operation. The final blob will be
-        committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str).
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param PageBlobTier premium_page_blob_tier:
-            A page blob tier value to set on the destination blob. The tier correlates to
-            the size of the blob and number of allowed IOPS. This is only applicable to
-            page blobs on premium storage accounts.
-            If the tier on the source blob is larger than the tier being passed to this
-            copy operation or if the size of the blob exceeds the tier being passed to
-            this copy operation then the operation will fail.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata, premium_page_blob_tier,
-                               source_if_modified_since, source_if_unmodified_since,
-                               source_if_match, source_if_none_match,
-                               destination_if_modified_since,
-                               destination_if_unmodified_since,
-                               destination_if_match,
-                               destination_if_none_match,
-                               destination_lease_id,
-                               source_lease_id, timeout,
-                               False)
-
-    # -----Helper methods-----------------------------------------------------
-
-    def _create_blob(
-            self, container_name, blob_name, content_length, content_settings=None,
-            sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            encryption_data=None):
-        '''
-        See create_blob for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        :param str encryption_data:
-            The JSON formatted encryption metadata to upload as a part of the blob.
-            This should only be passed internally from other methods and only applied
-            when uploading entire blob contents immediately follows creation of the blob.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-blob-content-length': _to_str(content_length),
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-blob-sequence-number': _to_str(sequence_number),
-            'x-ms-access-tier': _to_str(premium_page_blob_tier),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        if encryption_data is not None:
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _update_page(
-            self, container_name, blob_name, page, start_range, end_range,
-            validate_content=False, lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        See update_page for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        '''
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'update',
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-        request.body = _get_data_bytes_only('page', page)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_page_properties)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/blob/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/blob/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,179 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-)
-from ._constants import X_MS_VERSION
-
-
-class BlobSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating blob and container access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_blob(self, container_name, blob_name, permission=None,
-                      expiry=None, start=None, id=None, ip=None, protocol=None,
-                      cache_control=None, content_disposition=None,
-                      content_encoding=None, content_language=None,
-                      content_type=None):
-        '''
-        Generates a shared access signature for the blob.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = container_name + '/' + blob_name
-
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('b')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
-
-        return sas.get_token()
-
-    def generate_container(self, container_name, permission=None, expiry=None,
-                           start=None, id=None, ip=None, protocol=None,
-                           cache_control=None, content_disposition=None,
-                           content_encoding=None, content_language=None,
-                           content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('c')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name)
-
-        return sas.get_token()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,37 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ._constants import (
-    __author__,
-    __version__,
-    DEFAULT_X_MS_VERSION,
-)
-from .cloudstorageaccount import CloudStorageAccount
-from .models import (
-    RetentionPolicy,
-    Logging,
-    Metrics,
-    CorsRule,
-    DeleteRetentionPolicy,
-    ServiceProperties,
-    AccessPolicy,
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-    Protocol,
-    ServiceStats,
-    GeoReplication,
-    LocationMode,
-    RetryContext,
-)
-from .retry import (
-    ExponentialRetry,
-    LinearRetry,
-    no_retry,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from .tokencredential import TokenCredential
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_auth.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_auth.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_auth.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_auth.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,117 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ._common_conversion import (
-    _sign_string,
-)
-from ._constants import (
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_SECONDARY_NAME
-)
-
-import logging
-logger = logging.getLogger(__name__)
-
-
-class _StorageSharedKeyAuthentication(object):
-    def __init__(self, account_name, account_key, is_emulated=False):
-        self.account_name = account_name
-        self.account_key = account_key
-        self.is_emulated = is_emulated
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = request.path.split('?')[0]
-
-        # for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME
-        # as this is how the emulator works
-        if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1:
-            # only replace the first instance
-            uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
-
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        signature = _sign_string(self.account_key, string_to_sign)
-        auth_string = 'SharedKey ' + self.account_name + ':' + signature
-        request.headers['Authorization'] = auth_string
-
-
-class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        logger.debug("String_to_sign=%s", string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value is not None:
-                string_to_sign += '\n' + name.lower() + ':' + value
-
-        return string_to_sign
-
-
-class _StorageNoAuthentication(object):
-    def sign_request(self, request):
-        pass
-
-
-class _StorageSASAuthentication(object):
-    def __init__(self, sas_token):
-        # ignore ?-prefix (added by tools such as Azure Portal) on sas tokens
-        # doing so avoids double question marks when signing
-        if sas_token[0] == '?':
-            self.sas_token = sas_token[1:]
-        else:
-            self.sas_token = sas_token
-
-    def sign_request(self, request):
-        # if 'sig=' is present, then the request has already been signed
-        # as is the case when performing retries
-        if 'sig=' in request.path:
-            return
-        if '?' in request.path:
-            request.path += '&'
-        else:
-            request.path += '?'
-
-        request.path += self.sas_token
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_common_conversion.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_common_conversion.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_common_conversion.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_common_conversion.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,126 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-import sys
-from io import (SEEK_SET)
-
-from dateutil.tz import tzutc
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
-)
-from .models import (
-    _unicode_type,
-)
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-def _int_to_str(value):
-    return str(int(value)) if value is not None else None
-
-
-def _bool_to_str(value):
-    if value is None:
-        return None
-
-    if isinstance(value, bool):
-        if value:
-            return 'true'
-        else:
-            return 'false'
-
-    return str(value)
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _datetime_to_utc_string(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value is None:
-        return None
-
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-
-    return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
-
-
-def _encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def _decode_base64_to_bytes(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def _decode_base64_to_text(data):
-    decoded_bytes = _decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def _sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = _decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, _unicode_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, _unicode_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = _encode_base64(digest)
-    return encoded_digest
-
-
-def _get_content_md5(data):
-    md5 = hashlib.md5()
-    if isinstance(data, bytes):
-        md5.update(data)
-    elif hasattr(data, 'read'):
-        pos = 0
-        try:
-            pos = data.tell()
-        except:
-            pass
-        for chunk in iter(lambda: data.read(4096), b""):
-            md5.update(chunk)
-        try:
-            data.seek(pos, SEEK_SET)
-        except (AttributeError, IOError):
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
-    else:
-        raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
-
-    return base64.b64encode(md5.digest()).decode('utf-8')
-
-
-def _lower(text):
-    return text.lower()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_connection.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_connection.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_connection.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_connection.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,160 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from urllib.parse import urlparse
-else:
-    from urlparse import urlparse
-
-from ._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_SECONDARY_NAME,
-    DEV_ACCOUNT_KEY,
-    DEV_BLOB_HOST,
-    DEV_QUEUE_HOST,
-)
-from ._error import (
-    _ERROR_STORAGE_MISSING_INFO,
-)
-
-_EMULATOR_ENDPOINTS = {
-    'blob': DEV_BLOB_HOST,
-    'queue': DEV_QUEUE_HOST,
-    'file': '',
-}
-
-_CONNECTION_ENDPOINTS = {
-    'blob': 'BlobEndpoint',
-    'queue': 'QueueEndpoint',
-    'file': 'FileEndpoint',
-}
-
-_CONNECTION_ENDPOINTS_SECONDARY = {
-    'blob': 'BlobSecondaryEndpoint',
-    'queue': 'QueueSecondaryEndpoint',
-    'file': 'FileSecondaryEndpoint',
-}
-
-
-class _ServiceParameters(object):
-    def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, 
-                 custom_domain=None, custom_domain_secondary=None):
-
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.token_credential = token_credential
-        self.protocol = protocol or DEFAULT_PROTOCOL
-        self.is_emulated = is_emulated
-
-        if is_emulated:
-            self.account_name = DEV_ACCOUNT_NAME
-            self.protocol = 'http'
-
-            # Only set the account key if a sas_token is not present to allow sas to be used with the emulator
-            self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
-
-            self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_NAME)
-            self.secondary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_SECONDARY_NAME)
-        else:
-            # Strip whitespace from the key
-            if self.account_key:
-                self.account_key = self.account_key.strip()
-
-            endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
-
-            # Setup the primary endpoint
-            if custom_domain:
-                parsed_url = urlparse(custom_domain)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.primary_endpoint = parsed_url.netloc + path
-                self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme
-            else:
-                if not self.account_name:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-                self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
-
-            # Setup the secondary endpoint
-            if custom_domain_secondary:
-                if not custom_domain:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)   
-
-                parsed_url = urlparse(custom_domain_secondary)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.secondary_endpoint = parsed_url.netloc + path
-            else:
-                if self.account_name:
-                    self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
-                else:
-                    self.secondary_endpoint = None
-
-    @staticmethod
-    def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None,
-                               is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None,
-                               request_session=None, connection_string=None, socket_timeout=None):
-        if connection_string:
-            params = _ServiceParameters._from_connection_string(connection_string, service)
-        elif is_emulated:
-            params = _ServiceParameters(service, is_emulated=True)
-        elif account_name:
-            if protocol.lower() != 'https' and token_credential is not None:
-                raise ValueError("Token credential is only supported with HTTPS.")
-            params = _ServiceParameters(service,
-                                        account_name=account_name,
-                                        account_key=account_key,
-                                        sas_token=sas_token,
-                                        token_credential=token_credential,
-                                        is_emulated=is_emulated,
-                                        protocol=protocol,
-                                        endpoint_suffix=endpoint_suffix,
-                                        custom_domain=custom_domain)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        params.request_session = request_session
-        params.socket_timeout = socket_timeout
-        return params
-
-    @staticmethod
-    def _from_connection_string(connection_string, service):
-        # Split into key=value pairs removing empties, then split the pairs into a dict
-        config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
-
-        # Authentication
-        account_name = config.get('AccountName')
-        account_key = config.get('AccountKey')
-        sas_token = config.get('SharedAccessSignature')
-
-        # Emulator
-        is_emulated = config.get('UseDevelopmentStorage')
-
-        # Basic URL Configuration
-        protocol = config.get('DefaultEndpointsProtocol')
-        endpoint_suffix = config.get('EndpointSuffix')
-
-        # Custom URLs
-        endpoint = config.get(_CONNECTION_ENDPOINTS[service])
-        endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
-
-        return _ServiceParameters(service,
-                                  account_name=account_name,
-                                  account_key=account_key,
-                                  sas_token=sas_token,
-                                  is_emulated=is_emulated,
-                                  protocol=protocol,
-                                  endpoint_suffix=endpoint_suffix,
-                                  custom_domain=endpoint,
-                                  custom_domain_secondary=endpoint_secondary)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,47 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import platform
-import sys
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '1.2.0rc1'
-
-# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)'
-# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package
-USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__)
-USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(),
-                                                          platform.python_version(), platform.system(),
-                                                          platform.release())
-
-# default values for common package, in case it is used directly
-DEFAULT_X_MS_VERSION = '2017-11-09'
-DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX)
-
-# Live ServiceClient URLs
-SERVICE_HOST_BASE = 'core.windows.net'
-DEFAULT_PROTOCOL = 'https'
-
-# Development ServiceClient URLs
-DEV_BLOB_HOST = '127.0.0.1:10000'
-DEV_QUEUE_HOST = '127.0.0.1:10001'
-
-# Default credentials for Development Storage Service
-DEV_ACCOUNT_NAME = 'devstoreaccount1'
-DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary'
-DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
-
-# Socket timeout in seconds
-DEFAULT_SOCKET_TIMEOUT = 20
-
-# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
-# The socket timeout is now the maximum total duration to send all data.
-if sys.version_info >= (3, 5):
-    # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
-    # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
-    DEFAULT_SOCKET_TIMEOUT = (20, 2000)
-
-# Encryption constants
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,364 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-from ._common_conversion import _to_str
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    ServiceProperties,
-    Logging,
-    Metrics,
-    CorsRule,
-    AccessPolicy,
-    _dict,
-    GeoReplication,
-    ServiceStats,
-    DeleteRetentionPolicy,
-)
-
-
-def _to_int(value):
-    return value if value is None else int(value)
-
-
-def _bool(value):
-    return value.lower() == 'true'
-
-
-def _to_upper_str(value):
-    return _to_str(value).upper() if value is not None else None
-
-
-def _get_download_size(start_range, end_range, resource_size):
-    if start_range is not None:
-        end_range = end_range if end_range else (resource_size if resource_size else None)
-        if end_range is not None:
-            return end_range - start_range
-        else:
-            return None
-    else:
-        return resource_size
-
-
-GET_PROPERTIES_ATTRIBUTE_MAP = {
-    'last-modified': (None, 'last_modified', parser.parse),
-    'etag': (None, 'etag', _to_str),
-    'x-ms-blob-type': (None, 'blob_type', _to_str),
-    'content-length': (None, 'content_length', _to_int),
-    'content-range': (None, 'content_range', _to_str),
-    'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int),
-    'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int),
-    'x-ms-blob-public-access': (None, 'public_access', _to_str),
-    'x-ms-access-tier': (None, 'blob_tier', _to_str),
-    'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse),
-    'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
-    'x-ms-archive-status': (None, 'rehydration_status', _to_str),
-    'x-ms-share-quota': (None, 'quota', _to_int),
-    'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
-    'x-ms-creation-time': (None, 'creation_time', parser.parse),
-    'content-type': ('content_settings', 'content_type', _to_str),
-    'cache-control': ('content_settings', 'cache_control', _to_str),
-    'content-encoding': ('content_settings', 'content_encoding', _to_str),
-    'content-disposition': ('content_settings', 'content_disposition', _to_str),
-    'content-language': ('content_settings', 'content_language', _to_str),
-    'content-md5': ('content_settings', 'content_md5', _to_str),
-    'x-ms-lease-status': ('lease', 'status', _to_str),
-    'x-ms-lease-state': ('lease', 'state', _to_str),
-    'x-ms-lease-duration': ('lease', 'duration', _to_str),
-    'x-ms-copy-id': ('copy', 'id', _to_str),
-    'x-ms-copy-source': ('copy', 'source', _to_str),
-    'x-ms-copy-status': ('copy', 'status', _to_str),
-    'x-ms-copy-progress': ('copy', 'progress', _to_str),
-    'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
-    'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str),
-    'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
-    'x-ms-has-immutability-policy': (None, 'has_immutability_policy', _bool),
-    'x-ms-has-legal-hold': (None, 'has_legal_hold', _bool),
-}
-
-
-def _parse_metadata(response):
-    '''
-    Extracts out resource metadata information.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    metadata = _dict()
-    for key, value in response.headers.items():
-        if key.lower().startswith('x-ms-meta-'):
-            metadata[key[10:]] = _to_str(value)
-
-    return metadata
-
-
-def _parse_properties(response, result_class):
-    '''
-    Extracts out resource properties and metadata information.
-    Ignores the standard http headers.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    props = result_class()
-    for key, value in response.headers.items():
-        info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
-        if info:
-            if info[0] is None:
-                setattr(props, info[1], info[2](value))
-            else:
-                attr = getattr(props, info[0])
-                setattr(attr, info[1], info[2](value))
-
-    if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None:
-        props.blob_tier = _to_upper_str(props.blob_tier)
-    return props
-
-
-def _parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def _convert_xml_to_signed_identifiers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <SignedIdentifiers>
-      <SignedIdentifier>
-        <Id>unique-value</Id>
-        <AccessPolicy>
-          <Start>start-time</Start>
-          <Expiry>expiry-time</Expiry>
-          <Permission>abbreviated-permission-list</Permission>
-        </AccessPolicy>
-      </SignedIdentifier>
-    </SignedIdentifiers>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    list_element = ETree.fromstring(response.body)
-    signed_identifiers = _dict()
-
-    for signed_identifier_element in list_element.findall('SignedIdentifier'):
-        # Id element
-        id = signed_identifier_element.find('Id').text
-
-        # Access policy element
-        access_policy = AccessPolicy()
-        access_policy_element = signed_identifier_element.find('AccessPolicy')
-        if access_policy_element is not None:
-            start_element = access_policy_element.find('Start')
-            if start_element is not None:
-                access_policy.start = parser.parse(start_element.text)
-
-            expiry_element = access_policy_element.find('Expiry')
-            if expiry_element is not None:
-                access_policy.expiry = parser.parse(expiry_element.text)
-
-            access_policy.permission = access_policy_element.findtext('Permission')
-
-        signed_identifiers[id] = access_policy
-
-    return signed_identifiers
-
-
-def _convert_xml_to_service_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceStats>
-      <GeoReplication>      
-          <Status>live|bootstrap|unavailable</Status>
-          <LastSyncTime>sync-time|<empty></LastSyncTime>
-      </GeoReplication>
-    </StorageServiceStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_stats_element = ETree.fromstring(response.body)
-
-    geo_replication_element = service_stats_element.find('GeoReplication')
-
-    geo_replication = GeoReplication()
-    geo_replication.status = geo_replication_element.find('Status').text
-    last_sync_time = geo_replication_element.find('LastSyncTime').text
-    geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None
-
-    service_stats = ServiceStats()
-    service_stats.geo_replication = geo_replication
-    return service_stats
-
-
-def _convert_xml_to_service_properties(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-        <DeleteRetentionPolicy>
-             <Enabled>true|false</Enabled>
-             <Days>number-of-days</Days>
-        </DeleteRetentionPolicy>
-    </StorageServiceProperties>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_properties_element = ETree.fromstring(response.body)
-    service_properties = ServiceProperties()
-
-    # Logging
-    logging = service_properties_element.find('Logging')
-    if logging is not None:
-        service_properties.logging = Logging()
-        service_properties.logging.version = logging.find('Version').text
-        service_properties.logging.delete = _bool(logging.find('Delete').text)
-        service_properties.logging.read = _bool(logging.find('Read').text)
-        service_properties.logging.write = _bool(logging.find('Write').text)
-
-        _convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
-                                         service_properties.logging.retention_policy)
-    # HourMetrics
-    hour_metrics_element = service_properties_element.find('HourMetrics')
-    if hour_metrics_element is not None:
-        service_properties.hour_metrics = Metrics()
-        _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
-
-    # MinuteMetrics
-    minute_metrics_element = service_properties_element.find('MinuteMetrics')
-    if minute_metrics_element is not None:
-        service_properties.minute_metrics = Metrics()
-        _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
-
-    # CORS
-    cors = service_properties_element.find('Cors')
-    if cors is not None:
-        service_properties.cors = list()
-        for rule in cors.findall('CorsRule'):
-            allowed_origins = rule.find('AllowedOrigins').text.split(',')
-
-            allowed_methods = rule.find('AllowedMethods').text.split(',')
-
-            max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
-
-            cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
-
-            exposed_headers = rule.find('ExposedHeaders').text
-            if exposed_headers is not None:
-                cors_rule.exposed_headers = exposed_headers.split(',')
-
-            allowed_headers = rule.find('AllowedHeaders').text
-            if allowed_headers is not None:
-                cors_rule.allowed_headers = allowed_headers.split(',')
-
-            service_properties.cors.append(cors_rule)
-
-    # Target version
-    target_version = service_properties_element.find('DefaultServiceVersion')
-    if target_version is not None:
-        service_properties.target_version = target_version.text
-
-    # DeleteRetentionPolicy
-    delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy')
-    if delete_retention_policy_element is not None:
-        service_properties.delete_retention_policy = DeleteRetentionPolicy()
-        policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text)
-        service_properties.delete_retention_policy.enabled = policy_enabled
-
-        if policy_enabled:
-            service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text)
-
-    return service_properties
-
-
-def _convert_xml_to_metrics(xml, metrics):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    metrics.version = xml.find('Version').text
-
-    # Enabled
-    metrics.enabled = _bool(xml.find('Enabled').text)
-
-    # IncludeAPIs
-    include_apis_element = xml.find('IncludeAPIs')
-    if include_apis_element is not None:
-        metrics.include_apis = _bool(include_apis_element.text)
-
-    # RetentionPolicy
-    _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
-
-
-def _convert_xml_to_retention_policy(xml, retention_policy):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    retention_policy.enabled = _bool(xml.find('Enabled').text)
-
-    # Days
-    days_element = xml.find('Days')
-    if days_element is not None:
-        retention_policy.days = int(days_element.text)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_encryption.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,233 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-
-from ._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-    __version__,
-)
-from ._error import (
-    _ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
-    _validate_not_none,
-    _validate_encryption_protocol_version,
-    _validate_key_encryption_key_unwrap,
-    _validate_kek_id,
-)
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier, 
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-    
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    except KeyError:
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer 
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol)
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_unwrap(key_encryption_key)
-    _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid())
-
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_error.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,179 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from sys import version_info
-
-if version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-from azure.common import (
-    AzureHttpError,
-    AzureConflictHttpError,
-    AzureMissingResourceHttpError,
-    AzureException,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-)
-
-_ERROR_CONFLICT = 'Conflict ({0})'
-_ERROR_NOT_FOUND = 'Not found ({0})'
-_ERROR_UNKNOWN = 'Unknown error ({0})'
-_ERROR_STORAGE_MISSING_INFO = \
-    'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
-_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
-    'The emulator does not support the file service.'
-_ERROR_ACCESS_POLICY = \
-    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
-    'instance'
-_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
-_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
-_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
-_ERROR_VALUE_NONE = '{0} should not be None.'
-_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
-_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use {0} chunk downloader more than 1 thread must be ' + \
-    'used since get_{0}_to_bytes should be called for single threaded ' + \
-    '{0} downloads.'
-_ERROR_START_END_NEEDED_FOR_MD5 = \
-    'Both end_range and start_range need to be specified ' + \
-    'for getting content MD5.'
-_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
-    'Getting content MD5 for a range greater than 4MB ' + \
-    'is not supported.'
-_ERROR_MD5_MISMATCH = \
-    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
-_ERROR_TOO_MANY_ACCESS_POLICIES = \
-    'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
-    'Encryption version is not supported.'
-_ERROR_DECRYPTION_FAILURE = \
-    'Decryption failed'
-_ERROR_ENCRYPTION_REQUIRED = \
-    'Encryption required but no key was provided.'
-_ERROR_DECRYPTION_REQUIRED = \
-    'Decryption required but neither key nor resolver was provided.' + \
-    ' If you do not want to decypt, please do not set the require encryption flag.'
-_ERROR_INVALID_KID = \
-    'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
-_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
-    'Specified encryption algorithm is not supported.'
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
-                                           ' for this method.'
-_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
-_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
-                            'Data was either not encrypted or metadata has been lost.'
-
-
-def _dont_fail_on_exist(error):
-    ''' don't throw exception if the resource exists.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureConflictHttpError):
-        return False
-    else:
-        raise error
-
-
-def _dont_fail_not_exist(error):
-    ''' don't throw exception if the resource doesn't exist.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureMissingResourceHttpError):
-        return False
-    else:
-        raise error
-
-
-def _http_error_handler(http_error):
-    ''' Simple error handler for azure.'''
-    message = str(http_error)
-    if 'x-ms-error-code' in http_error.respheader:
-        message += 'ErrorCode: ' + http_error.respheader['x-ms-error-code']
-    if http_error.respbody is not None:
-        message += '\n' + http_error.respbody.decode('utf-8-sig')
-    raise AzureHttpError(message, http_error.status)
-
-
-def _validate_type_bytes(param_name, param):
-    if not isinstance(param, bytes):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _validate_type_bytes_or_stream(param_name, param):
-    if not (isinstance(param, bytes) or hasattr(param, 'read')):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError(_ERROR_VALUE_NONE.format(param_name))
-
-
-def _validate_content_match(server_md5, computed_md5):
-    if server_md5 != computed_md5:
-        raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
-
-
-def _validate_access_policies(identifiers):
-    if identifiers and len(identifiers) > 5:
-        raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-def _validate_key_encryption_key_unwrap(kek):
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-
-
-def _validate_encryption_required(require_encryption, kek):
-    if require_encryption and (kek is None):
-        raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
-
-
-def _validate_decryption_required(require_encryption, kek, resolver):
-    if (require_encryption and (kek is None) and
-            (resolver is None)):
-        raise ValueError(_ERROR_DECRYPTION_REQUIRED)
-
-
-def _validate_encryption_protocol_version(encryption_protocol):
-    if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-
-
-def _validate_kek_id(kid, resolved_id):
-    if not (kid == resolved_id):
-        raise ValueError(_ERROR_INVALID_KID)
-
-
-def _validate_encryption_unsupported(require_encryption, key_encryption_key):
-    if require_encryption or (key_encryption_key is not None):
-        raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_http/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_http/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_http/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_http/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,74 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-
-class HTTPError(Exception):
-    '''
-    Represents an HTTP Exception when response status code >= 300.
-
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar list headers:
-        the returned headers, as a list of (name, value) pairs
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, respheader, respbody):
-        self.status = status
-        self.respheader = respheader
-        self.respbody = respbody
-        Exception.__init__(self, message)
-
-
-class HTTPResponse(object):
-    '''
-    Represents a response from an HTTP request.
-    
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar dict headers:
-        the returned headers
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, headers, body):
-        self.status = status
-        self.message = message
-        self.headers = headers
-        self.body = body
-
-
-class HTTPRequest(object):
-    '''
-    Represents an HTTP Request.
-
-    :ivar str host:
-        the host name to connect to
-    :ivar str method:
-        the method to use to connect (string such as GET, POST, PUT, etc.)
-    :ivar str path:
-        the uri fragment
-    :ivar dict query:
-        query parameters
-    :ivar dict headers:
-        header values
-    :ivar bytes body:
-        the body of the request.
-    '''
-
-    def __init__(self):
-        self.host = ''
-        self.method = ''
-        self.path = ''
-        self.query = {}  # list of (name, value)
-        self.headers = {}  # list of (header name, header value)
-        self.body = ''
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_http/httpclient.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_http/httpclient.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_http/httpclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_http/httpclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-from . import HTTPResponse
-from .._serialization import _get_data_bytes_or_stream_only
-logger = logging.getLogger(__name__)
-
-
-class _HTTPClient(object):
-    '''
-    Takes the request and sends it to cloud service and returns the response.
-    '''
-
-    def __init__(self, protocol=None, session=None, timeout=None):
-        '''
-        :param str protocol:
-            http or https.
-        :param requests.Session session:
-            session object created with requests library (or compatible).
-        :param int timeout:
-            timeout for the http request, in seconds.
-        '''
-        self.protocol = protocol
-        self.session = session
-        self.timeout = timeout
-
-        # By default, requests adds an Accept:*/* and Accept-Encoding to the session, 
-        # which causes issues with some Azure REST APIs. Removing these here gives us 
-        # the flexibility to add it back on a case by case basis.
-        if 'Accept' in self.session.headers:
-            del self.session.headers['Accept']
-
-        if 'Accept-Encoding' in self.session.headers:
-            del self.session.headers['Accept-Encoding']
-
-        self.proxies = None
-
-    def set_proxy(self, host, port, user, password):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        Note that we set the proxies directly on the request later on rather than
-        using the session object as requests has a bug where session proxy is ignored
-        in favor of environment proxy. So, auth will not work unless it is passed
-        directly when making the request as this overrides both.
-
-        :param str host:
-            Address of the proxy. Ex: '192.168.0.100'
-        :param int port:
-            Port of the proxy. Ex: 6000
-        :param str user:
-            User for proxy authorization.
-        :param str password:
-            Password for proxy authorization.
-        '''
-        if user and password:
-            proxy_string = '{}:{}@{}:{}'.format(user, password, host, port)
-        else:
-            proxy_string = '{}:{}'.format(host, port)
-
-        self.proxies = {'http': 'http://{}'.format(proxy_string),
-                        'https': 'https://{}'.format(proxy_string)}
-
-    def perform_request(self, request):
-        '''
-        Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If 
-        the response code indicates an error, raise an HTTPError.    
-        
-        :param HTTPRequest request:
-            The request to serialize and send.
-        :return: An HTTPResponse containing the parsed HTTP response.
-        :rtype: :class:`~azure.storage.common._http.HTTPResponse`
-        '''
-        # Verify the body is in bytes or either a file-like/stream object
-        if request.body:
-            request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-
-        # Construct the URI
-        uri = self.protocol.lower() + '://' + request.host + request.path
-
-        # Send the request
-        response = self.session.request(request.method,
-                                        uri,
-                                        params=request.query,
-                                        headers=request.headers,
-                                        data=request.body or None,
-                                        timeout=self.timeout,
-                                        proxies=self.proxies)
-
-        # Parse the response
-        status = int(response.status_code)
-        response_headers = {}
-        for key, name in response.headers.items():
-            # Preserve the case of metadata
-            if key.lower().startswith('x-ms-meta-'):
-                response_headers[key] = name
-            else:
-                response_headers[key.lower()] = name
-
-        wrap = HTTPResponse(status, response.reason, response_headers, response.content)
-        response.close()
-
-        return wrap
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,352 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-import uuid
-from datetime import date
-from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation)
-from os import fstat
-from time import time
-from wsgiref.handlers import format_date_time
-
-from dateutil.tz import tzutc
-
-if sys.version_info >= (3,):
-    from urllib.parse import quote as url_quote
-else:
-    from urllib2 import quote as url_quote
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES,
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-)
-from .models import (
-    _unicode_type,
-)
-from ._common_conversion import (
-    _str,
-)
-
-
-def _to_utc_datetime(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _update_request(request, x_ms_version, user_agent_string):
-    # Verify body
-    if request.body:
-        request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-        length = _len_plus(request.body)
-
-        # only scenario where this case is plausible is if the stream object is not seekable.
-        if length is None:
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM)
-
-        # if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
-        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
-            request.headers['Content-Length'] = str(length)
-
-    # append addtional headers based on the service
-    request.headers['x-ms-version'] = x_ms_version
-    request.headers['User-Agent'] = user_agent_string
-    request.headers['x-ms-client-request-id'] = str(uuid.uuid1())
-
-    # If the host has a path component (ex local storage), move it
-    path = request.host.split('/', 1)
-    if len(path) == 2:
-        request.host = path[0]
-        request.path = '/{}{}'.format(path[1], request.path)
-
-    # Encode and optionally add local storage prefix to path
-    request.path = url_quote(request.path, '/()$=\',~')
-
-
-def _add_metadata_headers(metadata, request):
-    if metadata:
-        if not request.headers:
-            request.headers = {}
-        for name, value in metadata.items():
-            request.headers['x-ms-meta-' + name] = value
-
-
-def _add_date_header(request):
-    current_time = format_date_time(time())
-    request.headers['x-ms-date'] = current_time
-
-
-def _get_data_bytes_only(param_name, param_value):
-    '''Validates the request body passed in and converts it to bytes
-    if our policy allows it.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _get_data_bytes_or_stream_only(param_name, param_value):
-    '''Validates the request body passed in is a stream/file-like or bytes
-    object.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _get_request_body(request_body):
-    '''Converts an object into a request body.  If it's None
-    we'll return an empty string, if it's one of our objects it'll
-    convert it to XML and return it.  Otherwise we just use the object
-    directly'''
-    if request_body is None:
-        return b''
-
-    if isinstance(request_body, bytes) or isinstance(request_body, IOBase):
-        return request_body
-
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    request_body = str(request_body)
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    return request_body
-
-
-def _convert_signed_identifiers_to_xml(signed_identifiers):
-    if signed_identifiers is None:
-        return ''
-
-    sis = ETree.Element('SignedIdentifiers')
-    for id, access_policy in signed_identifiers.items():
-        # Root signed identifers element
-        si = ETree.SubElement(sis, 'SignedIdentifier')
-
-        # Id element
-        ETree.SubElement(si, 'Id').text = id
-
-        # Access policy element
-        policy = ETree.SubElement(si, 'AccessPolicy')
-
-        if access_policy.start:
-            start = access_policy.start
-            if isinstance(access_policy.start, date):
-                start = _to_utc_datetime(start)
-            ETree.SubElement(policy, 'Start').text = start
-
-        if access_policy.expiry:
-            expiry = access_policy.expiry
-            if isinstance(access_policy.expiry, date):
-                expiry = _to_utc_datetime(expiry)
-            ETree.SubElement(policy, 'Expiry').text = expiry
-
-        if access_policy.permission:
-            ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
-                                       cors, target_version=None, delete_retention_policy=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-        <DeleteRetentionPolicy>
-             <Enabled>true|false</Enabled>
-             <Days>number-of-days</Days>
-        </DeleteRetentionPolicy>
-    </StorageServiceProperties>
-    '''
-    service_properties_element = ETree.Element('StorageServiceProperties')
-
-    # Logging
-    if logging:
-        logging_element = ETree.SubElement(service_properties_element, 'Logging')
-        ETree.SubElement(logging_element, 'Version').text = logging.version
-        ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
-        ETree.SubElement(logging_element, 'Read').text = str(logging.read)
-        ETree.SubElement(logging_element, 'Write').text = str(logging.write)
-
-        retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
-        _convert_retention_policy_to_xml(logging.retention_policy, retention_element)
-
-    # HourMetrics
-    if hour_metrics:
-        hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
-        _convert_metrics_to_xml(hour_metrics, hour_metrics_element)
-
-    # MinuteMetrics
-    if minute_metrics:
-        minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
-        _convert_metrics_to_xml(minute_metrics, minute_metrics_element)
-
-    # CORS
-    # Make sure to still serialize empty list
-    if cors is not None:
-        cors_element = ETree.SubElement(service_properties_element, 'Cors')
-        for rule in cors:
-            cors_rule = ETree.SubElement(cors_element, 'CorsRule')
-            ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
-            ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
-            ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
-            ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
-            ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
-
-    # Target version
-    if target_version:
-        ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
-
-    # DeleteRetentionPolicy
-    if delete_retention_policy:
-        policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
-        ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
-
-        if delete_retention_policy.enabled:
-            ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
-                                                            method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_metrics_to_xml(metrics, root):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    ETree.SubElement(root, 'Version').text = metrics.version
-
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
-
-    # IncludeAPIs
-    if metrics.enabled and metrics.include_apis is not None:
-        ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
-
-    # RetentionPolicy
-    retention_element = ETree.SubElement(root, 'RetentionPolicy')
-    _convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
-
-
-def _convert_retention_policy_to_xml(retention_policy, root):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
-
-    # Days
-    if retention_policy.enabled and retention_policy.days:
-        ETree.SubElement(root, 'Days').text = str(retention_policy.days)
-
-
-def _len_plus(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            return fstat(fileno).st_size
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/cloudstorageaccount.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/cloudstorageaccount.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/cloudstorageaccount.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/cloudstorageaccount.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,188 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-# Note that we import BlobService/QueueService/FileService on demand
-# because this module is imported by azure/storage/__init__
-# ie. we don't want 'import azure.storage' to trigger an automatic import
-# of blob/queue/file packages.
-
-from ._error import _validate_not_none
-from .models import (
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-
-
-class CloudStorageAccount(object):
-    """
-    Provides a factory for creating the blob, queue, and file services
-    with a common account name and account key or sas token.  Users can either 
-    use the factory or can construct the appropriate service directly.
-    """
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless is_emulated is used.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.is_emulated = is_emulated
-
-    def create_block_blob_service(self):
-        '''
-        Creates a BlockBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService`
-        '''
-        try:
-            from ..blob.blockblobservice import BlockBlobService
-            return BlockBlobService(self.account_name, self.account_key,
-                                    sas_token=self.sas_token,
-                                    is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_page_blob_service(self):
-        '''
-        Creates a PageBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService`
-        '''
-        try:
-            from ..blob.pageblobservice import PageBlobService
-            return PageBlobService(self.account_name, self.account_key,
-                                   sas_token=self.sas_token,
-                                   is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_append_blob_service(self):
-        '''
-        Creates a AppendBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService`
-        '''
-        try:
-            from ..blob.appendblobservice import AppendBlobService
-            return AppendBlobService(self.account_name, self.account_key,
-                                     sas_token=self.sas_token,
-                                     is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_queue_service(self):
-        '''
-        Creates a QueueService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.queue.queueservice.QueueService`
-        '''
-        try:
-            from ..queue.queueservice import QueueService
-            return QueueService(self.account_name, self.account_key,
-                                sas_token=self.sas_token,
-                                is_emulated=self.is_emulated)
-        except ImportError:
-            raise Exception('The package azure-storage-queue is required. '
-                            + 'Please install it using "pip install azure-storage-queue"')
-
-    def create_file_service(self):
-        '''
-        Creates a FileService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.file.fileservice.FileService`
-        '''
-        try:
-            from ..file.fileservice import FileService
-            return FileService(self.account_name, self.account_key,
-                               sas_token=self.sas_token)
-        except ImportError:
-            raise Exception('The package azure-storage-file is required. '
-                            + 'Please install it using "pip install azure-storage-file"')
-
-    def generate_shared_access_signature(self, services, resource_types,
-                                         permission, expiry, start=None,
-                                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(services, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/models.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,649 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info < (3,):
-    from collections import Iterable
-
-    _unicode_type = unicode
-else:
-    from collections.abc import Iterable
-
-    _unicode_type = str
-
-from ._error import (
-    _validate_not_none
-)
-
-
-class _HeaderDict(dict):
-    def __getitem__(self, index):
-        return super(_HeaderDict, self).__getitem__(index.lower())
-
-
-class _list(list):
-    '''Used so that additional properties can be set on the return list'''
-    pass
-
-
-class _dict(dict):
-    '''Used so that additional properties can be set on the return dictionary'''
-    pass
-
-
-class _OperationContext(object):
-    '''
-    Contains information that lasts the lifetime of an operation. This operation 
-    may span multiple calls to the Azure service.
-
-    :ivar bool location_lock: 
-        Whether the location should be locked for this operation.
-    :ivar str location: 
-        The location to lock to.
-    '''
-
-    def __init__(self, location_lock=False):
-        self.location_lock = location_lock
-        self.host_location = None
-
-
-class ListGenerator(Iterable):
-    '''
-    A generator object used to list storage resources. The generator will lazily 
-    follow the continuation tokens returned by the service and stop when all 
-    resources have been returned or max_results is reached.
-
-    If max_results is specified and the account has more than that number of 
-    resources, the generator will have a populated next_marker field once it 
-    finishes. This marker can be used to create a new generator if more 
-    results are desired.
-    '''
-
-    def __init__(self, resources, list_method, list_args, list_kwargs):
-        self.items = resources
-        self.next_marker = resources.next_marker
-
-        self._list_method = list_method
-        self._list_args = list_args
-        self._list_kwargs = list_kwargs
-
-    def __iter__(self):
-        # return results
-        for i in self.items:
-            yield i
-
-        while True:
-            # if no more results on the service, return
-            if not self.next_marker:
-                break
-
-            # update the marker args
-            self._list_kwargs['marker'] = self.next_marker
-
-            # handle max results, if present
-            max_results = self._list_kwargs.get('max_results')
-            if max_results is not None:
-                max_results = max_results - len(self.items)
-
-                # if we've reached max_results, return
-                # else, update the max_results arg
-                if max_results <= 0:
-                    break
-                else:
-                    self._list_kwargs['max_results'] = max_results
-
-            # get the next segment
-            resources = self._list_method(*self._list_args, **self._list_kwargs)
-            self.items = resources
-            self.next_marker = resources.next_marker
-
-            # return results
-            for i in self.items:
-                yield i
-
-
-class RetryContext(object):
-    '''
-    Contains the request and response information that can be used to determine 
-    whether and how to retry. This context is stored across retries and may be 
-    used to store other information relevant to the retry strategy.
-
-    :ivar ~azure.storage.common._http.HTTPRequest request:
-        The request sent to the storage service.
-    :ivar ~azure.storage.common._http.HTTPResponse response:
-        The response returned by the storage service.
-    :ivar LocationMode location_mode:
-        The location the request was sent to.
-    :ivar Exception exception:
-        The exception that just occurred. The type could either be AzureException (for HTTP errors),
-        or other Exception types from lower layers, which are kept unwrapped for easier processing.
-    :ivar bool is_emulated:
-        Whether retry is targeting the emulator. The default value is False.
-    :ivar int body_position:
-        The initial position of the body stream. It is useful when retries happen and we need to rewind the stream.
-    '''
-
-    def __init__(self):
-        self.request = None
-        self.response = None
-        self.location_mode = None
-        self.exception = None
-        self.is_emulated = False
-        self.body_position = None
-
-
-class LocationMode(object):
-    '''
-    Specifies the location the request should be sent to. This mode only applies 
-    for RA-GRS accounts which allow secondary read access. All other account types 
-    must use PRIMARY.
-    '''
-
-    PRIMARY = 'primary'
-    ''' Requests should be sent to the primary location. '''
-
-    SECONDARY = 'secondary'
-    ''' Requests should be sent to the secondary location, if possible. '''
-
-
-class RetentionPolicy(object):
-    '''
-    By default, Storage Analytics will not delete any logging or metrics data. Blobs
-    will continue to be written until the shared 20TB limit is
-    reached. Once the 20TB limit is reached, Storage Analytics will stop writing 
-    new data and will not resume until free space is available. This 20TB limit 
-    is independent of the total limit for your storage account.
-
-    There are two ways to delete Storage Analytics data: by manually making deletion 
-    requests or by setting a data retention policy. Manual requests to delete Storage 
-    Analytics data are billable, but delete requests resulting from a retention policy 
-    are not billable.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled: 
-            Indicates whether a retention policy is enabled for the 
-            storage service. If disabled, logging and metrics data will be retained 
-            infinitely by the service unless explicitly deleted.
-        :param int days: 
-            Required if enabled is true. Indicates the number of 
-            days that metrics or logging data should be retained. All data older 
-            than this value will be deleted. The minimum value you can specify is 1; 
-            the largest value is 365 (one year).
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class Logging(object):
-    '''
-    Storage Analytics logs detailed information about successful and failed requests 
-    to a storage service. This information can be used to monitor individual requests 
-    and to diagnose issues with a storage service. Requests are logged on a best-effort 
-    basis.
-
-    All logs are stored in block blobs in a container named $logs, which is
-    automatically created when Storage Analytics is enabled for a storage account. 
-    The $logs container is located in the blob namespace of the storage account. 
-    This container cannot be deleted once Storage Analytics has been enabled, though 
-    its contents can be deleted.
-
-    For more information, see  https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
-    '''
-
-    def __init__(self, delete=False, read=False, write=False,
-                 retention_policy=None):
-        '''
-        :param bool delete: 
-            Indicates whether all delete requests should be logged.
-        :param bool read: 
-            Indicates whether all read requests should be logged.
-        :param bool write: 
-            Indicates whether all write requests should be logged.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("read", read)
-        _validate_not_none("write", write)
-        _validate_not_none("delete", delete)
-
-        self.version = u'1.0'
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class Metrics(object):
-    '''
-    Metrics include aggregated transaction statistics and capacity data about requests 
-    to a storage service. Transactions are reported at both the API operation level 
-    as well as at the storage service level, and capacity is reported at the storage 
-    service level. Metrics data can be used to analyze storage service usage, diagnose 
-    issues with requests made against the storage service, and to improve the 
-    performance of applications that use a service.
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
-    '''
-
-    def __init__(self, enabled=False, include_apis=None,
-                 retention_policy=None):
-        '''
-        :param bool enabled: 
-            Indicates whether metrics are enabled for 
-            the service.
-        :param bool include_apis: 
-            Required if enabled is True. Indicates whether metrics 
-            should generate summary statistics for called API operations.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("include_apis", include_apis)
-
-        self.version = u'1.0'
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class CorsRule(object):
-    '''
-    CORS is an HTTP feature that enables a web application running under one domain 
-    to access resources in another domain. Web browsers implement a security 
-    restriction known as same-origin policy that prevents a web page from calling 
-    APIs in a different domain; CORS provides a secure way to allow one domain 
-    (the origin domain) to call APIs in another domain. 
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
-                 exposed_headers=None, allowed_headers=None):
-        '''
-        :param allowed_origins: 
-            A list of origin domains that will be allowed via CORS, or "*" to allow 
-            all domains. The list of must contain at least one entry. Limited to 64 
-            origin domains. Each allowed origin can have up to 256 characters.
-        :type allowed_origins: list(str)
-        :param allowed_methods:
-            A list of HTTP methods that are allowed to be executed by the origin. 
-            The list of must contain at least one entry. For Azure Storage, 
-            permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-        :type allowed_methods: list(str)
-        :param int max_age_in_seconds:
-            The number of seconds that the client/browser should cache a 
-            preflight response.
-        :param exposed_headers:
-            Defaults to an empty list. A list of response headers to expose to CORS 
-            clients. Limited to 64 defined headers and two prefixed headers. Each 
-            header can be up to 256 characters.
-        :type exposed_headers: list(str)
-        :param allowed_headers:
-            Defaults to an empty list. A list of headers allowed to be part of 
-            the cross-origin request. Limited to 64 defined headers and 2 prefixed 
-            headers. Each header can be up to 256 characters.
-        :type allowed_headers: list(str)
-        '''
-        _validate_not_none("allowed_origins", allowed_origins)
-        _validate_not_none("allowed_methods", allowed_methods)
-        _validate_not_none("max_age_in_seconds", max_age_in_seconds)
-
-        self.allowed_origins = allowed_origins if allowed_origins else list()
-        self.allowed_methods = allowed_methods if allowed_methods else list()
-        self.max_age_in_seconds = max_age_in_seconds
-        self.exposed_headers = exposed_headers if exposed_headers else list()
-        self.allowed_headers = allowed_headers if allowed_headers else list()
-
-
-class DeleteRetentionPolicy(object):
-    '''
-    To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later.
-    This class groups the settings related to delete retention policy.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled:
-            Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation.
-        :param int days:
-            Required only if Enabled is true. Indicates the number of days that deleted blob be retained.
-            All data older than this value will be permanently deleted.
-            The minimum value you can specify is 1; the largest value is 365.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class ServiceProperties(object):
-    ''' 
-    Returned by get_*_service_properties functions. Contains the properties of a 
-    storage service, including Analytics and CORS rules.
-
-    Azure Storage Analytics performs logging and provides metrics data for a storage 
-    account. You can use this data to trace requests, analyze usage trends, and 
-    diagnose issues with your storage account. To use Storage Analytics, you must 
-    enable it individually for each service you want to monitor.
-
-    The aggregated data is stored in a well-known blob (for logging) and in well-known 
-    tables (for metrics), which may be accessed using the Blob service and Table 
-    service APIs.
-
-    For an in-depth guide on using Storage Analytics and other tools to identify, 
-    diagnose, and troubleshoot Azure Storage-related issues, see 
-    http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
-
-    For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    pass
-
-
-class ServiceStats(object):
-    ''' 
-    Returned by get_*_service_stats functions. Contains statistics related to 
-    replication for the given service. It is only available when read-access 
-    geo-redundant replication is enabled for the storage account.
-
-    :ivar GeoReplication geo_replication:
-        An object containing statistics related to replication for the given service.
-    '''
-    pass
-
-
-class GeoReplication(object):
-    ''' 
-    Contains statistics related to replication for the given service.
-
-    :ivar str status:
-        The status of the secondary location. Possible values are:
-            live: Indicates that the secondary location is active and operational.
-            bootstrap: Indicates initial synchronization from the primary location 
-                to the secondary location is in progress. This typically occurs 
-                when replication is first enabled.
-            unavailable: Indicates that the secondary location is temporarily 
-                unavailable.
-    :ivar date last_sync_time:
-        A GMT date value, to the second. All primary writes preceding this value 
-        are guaranteed to be available for read operations at the secondary. 
-        Primary writes after this point in time may or may not be available for 
-        reads. The value may be empty if LastSyncTime is not available. This can 
-        happen if the replication status is bootstrap or unavailable. Although 
-        geo-replication is continuously enabled, the LastSyncTime result may 
-        reflect a cached value from the service that is refreshed every few minutes.
-    '''
-    pass
-
-
-class AccessPolicy(object):
-    '''
-    Access Policy class used by the set and get acl methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and 
-    permissions for the Shared Access Signatures with which it's associated. 
-    Depending on how you want to control access to your resource, you can
-    specify all of these parameters within the stored access policy, and omit 
-    them from the URL for the Shared Access Signature. Doing so permits you to 
-    modify the associated signature's behavior at any time, as well as to revoke 
-    it. Or you can specify one or more of the access policy parameters within 
-    the stored access policy, and the others on the URL. Finally, you can 
-    specify all of the parameters on the URL. In this case, you can use the 
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must 
-    include all fields required to authenticate the signature. If any required 
-    fields are missing, the request will fail. Likewise, if a field is specified 
-    both in the Shared Access Signature URL and in the stored access policy, the 
-    request will fail with status code 400 (Bad Request).
-    '''
-
-    def __init__(self, permission=None, expiry=None, start=None):
-        '''
-        :param str permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        '''
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class Protocol(object):
-    '''
-    Specifies the protocol permitted for a SAS token. Note that HTTP only is 
-    not allowed.
-    '''
-
-    HTTPS = 'https'
-    ''' Allow HTTPS requests only. '''
-
-    HTTPS_HTTP = 'https,http'
-    ''' Allow HTTP and HTTPS requests. '''
-
-
-class ResourceTypes(object):
-    '''
-    Specifies the resource types that are accessible with the account SAS.
-
-    :ivar ResourceTypes ResourceTypes.CONTAINER:
-        Access to container-level APIs (e.g., Create/Delete Container, 
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories) 
-    :ivar ResourceTypes ResourceTypes.OBJECT:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-    :ivar ResourceTypes ResourceTypes.SERVICE:
-        Access to service-level APIs (e.g., Get/Set Service Properties, 
-        Get Service Stats, List Containers/Queues/Shares)
-    '''
-
-    def __init__(self, service=False, container=False, object=False, _str=None):
-        '''
-        :param bool service:
-            Access to service-level APIs (e.g., Get/Set Service Properties, 
-            Get Service Stats, List Containers/Queues/Shares)
-        :param bool container:
-            Access to container-level APIs (e.g., Create/Delete Container, 
-            Create/Delete Queue, Create/Delete Share,
-            List Blobs/Files and Directories) 
-        :param bool object:
-            Access to object-level APIs for blobs, queue messages, and
-            files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-        :param str _str: 
-            A string representing the resource types.
-        '''
-        if not _str:
-            _str = ''
-        self.service = service or ('s' in _str)
-        self.container = container or ('c' in _str)
-        self.object = object or ('o' in _str)
-
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-
-ResourceTypes.SERVICE = ResourceTypes(service=True)
-ResourceTypes.CONTAINER = ResourceTypes(container=True)
-ResourceTypes.OBJECT = ResourceTypes(object=True)
-
-
-class Services(object):
-    '''
-    Specifies the services accessible with the account SAS.
-
-    :ivar Services Services.BLOB: The blob service.
-    :ivar Services Services.FILE: The file service
-    :ivar Services Services.QUEUE: The queue service.
-    :ivar Services Services.TABLE: The table service.
-    '''
-
-    def __init__(self, blob=False, queue=False, file=False, table=False, _str=None):
-        '''
-        :param bool blob:
-            Access to any blob service, for example, the `.BlockBlobService`
-        :param bool queue:
-            Access to the `.QueueService`
-        :param bool file:
-            Access to the `.FileService`
-        :param bool table:
-            Access to the TableService
-        :param str _str: 
-            A string representing the services.
-        '''
-        if not _str:
-            _str = ''
-        self.blob = blob or ('b' in _str)
-        self.queue = queue or ('q' in _str)
-        self.file = file or ('f' in _str)
-        self.table = table or ('t' in _str)
-
-    def __or__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('t' if self.table else '') +
-                ('f' if self.file else ''))
-
-
-Services.BLOB = Services(blob=True)
-Services.QUEUE = Services(queue=True)
-Services.TABLE = Services(table=True)
-Services.FILE = Services(file=True)
-
-
-class AccountPermissions(object):
-    '''
-    :class:`~ResourceTypes` class to be used with generate_shared_access_signature 
-    method and for the AccessPolicies used with set_*_acl. There are two types of 
-    SAS which may be used to grant resource access. One is to grant access to a 
-    specific resource (resource-specific). Another is to grant access to the 
-    entire service for a specific account and allow certain operations based on 
-    perms found here.
-
-    :ivar AccountPermissions AccountPermissions.ADD:
-        Valid for the following Object resource types only: queue messages and append blobs.
-    :ivar AccountPermissions AccountPermissions.CREATE:
-        Valid for the following Object resource types only: blobs and files. Users 
-        can create new blobs or files, but may not overwrite existing blobs or files. 
-    :ivar AccountPermissions AccountPermissions.DELETE:
-        Valid for Container and Object resource types, except for queue messages. 
-    :ivar AccountPermissions AccountPermissions.LIST:
-        Valid for Service and Container resource types only. 
-    :ivar AccountPermissions AccountPermissions.PROCESS:
-        Valid for the following Object resource type only: queue messages. 
-    :ivar AccountPermissions AccountPermissions.READ:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits read permissions to the specified resource type. 
-    :ivar AccountPermissions AccountPermissions.UPDATE:
-        Valid for the following Object resource types only: queue messages.
-    :ivar AccountPermissions AccountPermissions.WRITE:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits write permissions to the specified resource type. 
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 add=False, create=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits read permissions to the specified resource type.
-        :param bool write:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits write permissions to the specified resource type.
-        :param bool delete: 
-            Valid for Container and Object resource types, except for queue messages.
-        :param bool list:
-            Valid for Service and Container resource types only.
-        :param bool add:
-            Valid for the following Object resource types only: queue messages, and append blobs.
-        :param bool create:
-            Valid for the following Object resource types only: blobs and files. 
-            Users can create new blobs or files, but may not overwrite existing 
-            blobs or files.
-        :param bool update:
-            Valid for the following Object resource types only: queue messages.
-        :param bool process:
-            Valid for the following Object resource type only: queue messages.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return AccountPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return AccountPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-AccountPermissions.READ = AccountPermissions(read=True)
-AccountPermissions.WRITE = AccountPermissions(write=True)
-AccountPermissions.DELETE = AccountPermissions(delete=True)
-AccountPermissions.LIST = AccountPermissions(list=True)
-AccountPermissions.ADD = AccountPermissions(add=True)
-AccountPermissions.CREATE = AccountPermissions(create=True)
-AccountPermissions.UPDATE = AccountPermissions(update=True)
-AccountPermissions.PROCESS = AccountPermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/retry.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/retry.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/retry.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/retry.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,306 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from abc import ABCMeta
-from math import pow
-import random
-from io import (SEEK_SET, UnsupportedOperation)
-
-from .models import LocationMode
-from ._constants import (
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_SECONDARY_NAME
-)
-
-
-class _Retry(object):
-    '''
-    The base class for Exponential and Linear retries containing shared code.
-    '''
-    __metaclass__ = ABCMeta
-
-    def __init__(self, max_attempts, retry_to_secondary):
-        '''
-        Constructs a base retry object.
-
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        '''
-        self.max_attempts = max_attempts
-        self.retry_to_secondary = retry_to_secondary
-
-    def _should_retry(self, context):
-        '''
-        A function which determines whether or not to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :return: 
-            A boolean indicating whether or not to retry the request.
-        :rtype: bool
-        '''
-        # If max attempts are reached, do not retry.
-        if context.count >= self.max_attempts:
-            return False
-
-        status = None
-        if context.response and context.response.status:
-            status = context.response.status
-
-        if status is None:
-            '''
-            If status is None, retry as this request triggered an exception. For 
-            example, network issues would trigger this.
-            '''
-            return True
-        elif 200 <= status < 300:
-            '''
-            This method is called after a successful response, meaning we failed 
-            during the response body download or parsing. So, success codes should 
-            be retried.
-            '''
-            return True
-        elif 300 <= status < 500:
-            '''
-            An exception occured, but in most cases it was expected. Examples could 
-            include a 309 Conflict or 412 Precondition Failed.
-            '''
-            if status == 404 and context.location_mode == LocationMode.SECONDARY:
-                # Response code 404 should be retried if secondary was used.
-                return True
-            if status == 408:
-                # Response code 408 is a timeout and should be retried.
-                return True
-            return False
-        elif status >= 500:
-            '''
-            Response codes above 500 with the exception of 501 Not Implemented and 
-            505 Version Not Supported indicate a server issue and should be retried.
-            '''
-            if status == 501 or status == 505:
-                return False
-            return True
-        else:
-            # If something else happened, it's unexpected. Retry.
-            return True
-
-    def _set_next_host_location(self, context):
-        '''
-        A function which sets the next host location on the request, if applicable. 
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context containing the previous host location and the request 
-            to evaluate and possibly modify.
-        '''
-        if len(context.request.host_locations) > 1:
-            # If there's more than one possible location, retry to the alternative
-            if context.location_mode == LocationMode.PRIMARY:
-                context.location_mode = LocationMode.SECONDARY
-
-                # if targeting the emulator (with path style), change path instead of host
-                if context.is_emulated:
-                    # replace the first instance of primary account name with the secondary account name
-                    context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1)
-                else:
-                    context.request.host = context.request.host_locations.get(context.location_mode)
-            else:
-                context.location_mode = LocationMode.PRIMARY
-
-                # if targeting the emulator (with path style), change path instead of host
-                if context.is_emulated:
-                    # replace the first instance of secondary account name with the primary account name
-                    context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
-                else:
-                    context.request.host = context.request.host_locations.get(context.location_mode)
-
-    def _retry(self, context, backoff):
-        '''
-        A function which determines whether and how to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :param function() backoff:
-            A function which returns the backoff time if a retry is to be performed.
-        :return: 
-            An integer indicating how long to wait before retrying the request, 
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        '''
-        # If the context does not contain a count parameter, this request has not 
-        # been retried yet. Add the count parameter to track the number of retries.
-        if not hasattr(context, 'count'):
-            context.count = 0
-
-        # Determine whether to retry, and if so increment the count, modify the 
-        # request as desired, and return the backoff.
-        if self._should_retry(context):
-            backoff_interval = backoff(context)
-            context.count += 1
-
-            # If retry to secondary is enabled, attempt to change the host if the 
-            # request allows it
-            if self.retry_to_secondary:
-                self._set_next_host_location(context)
-
-            # rewind the request body if it is a stream
-            if hasattr(context.request.body, 'read'):
-                # no position was saved, then retry would not work
-                if context.body_position is None:
-                    return None
-                else:
-                    try:
-                        # attempt to rewind the body to the initial position
-                        context.request.body.seek(context.body_position, SEEK_SET)
-                    except UnsupportedOperation:
-                        # if body is not seekable, then retry would not work
-                        return None
-
-            return backoff_interval
-
-        return None
-
-
-class ExponentialRetry(_Retry):
-    '''
-    Exponential retry.
-    '''
-
-    def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3,
-                 retry_to_secondary=False, random_jitter_range=3):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for 
-        the first retry. Subsequent retries are retried after initial_backoff + 
-        increment_power^retry_count seconds. For example, by default the first retry 
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the 
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff: 
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the 
-            first retry.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(_Retry):
-    '''
-    Linear retry.
-    '''
-
-    def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3):
-        '''
-        Constructs a Linear retry object.
-
-        :param int backoff: 
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.backoff = backoff
-        self.max_attempts = max_attempts
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0
-        self.random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(self.random_range_start, self.random_range_end)
-
-
-def no_retry(context):
-    '''
-    Specifies never to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context.
-    :return: 
-        Always returns None to indicate never to retry.
-    :rtype: None
-    '''
-    return None
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,217 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from datetime import date
-
-from ._common_conversion import (
-    _sign_string,
-    _to_str,
-)
-from ._constants import DEFAULT_X_MS_VERSION
-from ._serialization import (
-    url_quote,
-    _to_utc_datetime,
-)
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the 
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _to_str(val)
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(_QueryStringConstants.SIGNED_START, start)
-        self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(_QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, id):
-        self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_resource_signature(self, account_name, account_key, service, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        if service == 'blob' or service == 'file':
-            string_to_sign += \
-                (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-                 get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/storageclient.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/storageclient.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/storageclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/storageclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,379 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-from abc import ABCMeta
-import logging
-
-logger = logging.getLogger(__name__)
-from time import sleep
-
-import requests
-from azure.common import (
-    AzureException,
-)
-
-from ._constants import (
-    DEFAULT_SOCKET_TIMEOUT,
-    DEFAULT_X_MS_VERSION,
-    DEFAULT_USER_AGENT_STRING,
-    USER_AGENT_STRING_PREFIX,
-    USER_AGENT_STRING_SUFFIX,
-)
-from ._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _http_error_handler,
-)
-from ._http import HTTPError
-from ._http.httpclient import _HTTPClient
-from ._serialization import (
-    _update_request,
-    _add_date_header,
-)
-from .models import (
-    RetryContext,
-    LocationMode,
-    _OperationContext,
-)
-from .retry import ExponentialRetry
-from io import UnsupportedOperation
-
-
-class StorageClient(object):
-    '''
-    This is the base class for service objects. Service objects are used to do 
-    all requests to Storage. This class cannot be instantiated directly.
-
-    :ivar str account_name:
-        The storage account name. This is used to authenticate requests 
-        signed with an account key and to construct the storage endpoint. It 
-        is required unless a connection string is given, or if a custom 
-        domain is used with anonymous authentication.
-    :ivar str account_key:
-        The storage account key. This is used for shared key authentication. 
-        If neither account key or sas token is specified, anonymous access 
-        will be used.
-    :ivar str sas_token:
-        A shared access signature token to use to authenticate requests 
-        instead of the account key. If account key and sas token are both 
-        specified, account key will be used to sign. If neither are 
-        specified, anonymous access will be used.
-    :ivar str primary_endpoint:
-        The endpoint to send storage requests to.
-    :ivar str secondary_endpoint:
-        The secondary endpoint to read storage data from. This will only be a 
-        valid endpoint if the storage account used is RA-GRS and thus allows 
-        reading from secondary.
-    :ivar function(context) retry:
-        A function which determines whether to retry. Takes as a parameter a 
-        :class:`~azure.storage.common.models.RetryContext` object. Returns the number
-        of seconds to wait before retrying the request, or None to indicate not 
-        to retry.
-    :ivar ~azure.storage.common.models.LocationMode location_mode:
-        The host location to use to make requests. Defaults to LocationMode.PRIMARY.
-        Note that this setting only applies to RA-GRS accounts as other account 
-        types do not allow reading from secondary. If the location_mode is set to 
-        LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. 
-        Write requests will continue to be sent to primary.
-    :ivar str protocol:
-        The protocol to use for requests. Defaults to https.
-    :ivar requests.Session request_session:
-        The session object to use for http requests.
-    :ivar function(request) request_callback:
-        A function called immediately before each request is sent. This function 
-        takes as a parameter the request object and returns nothing. It may be 
-        used to added custom headers or log request data.
-    :ivar function() response_callback:
-        A function called immediately after each response is received. This 
-        function takes as a parameter the response object and returns nothing. 
-        It may be used to log response data.
-    :ivar function() retry_callback:
-        A function called immediately after retry evaluation is performed. This 
-        function takes as a parameter the retry context object and returns nothing. 
-        It may be used to detect retries and log context information.
-    '''
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, connection_params):
-        '''
-        :param obj connection_params: The parameters to use to construct the client.
-        '''
-        self.account_name = connection_params.account_name
-        self.account_key = connection_params.account_key
-        self.sas_token = connection_params.sas_token
-        self.token_credential = connection_params.token_credential
-        self.is_emulated = connection_params.is_emulated
-
-        self.primary_endpoint = connection_params.primary_endpoint
-        self.secondary_endpoint = connection_params.secondary_endpoint
-
-        protocol = connection_params.protocol
-        request_session = connection_params.request_session or requests.Session()
-        socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT
-        self._httpclient = _HTTPClient(
-            protocol=protocol,
-            session=request_session,
-            timeout=socket_timeout,
-        )
-
-        self.retry = ExponentialRetry().retry
-        self.location_mode = LocationMode.PRIMARY
-
-        self.request_callback = None
-        self.response_callback = None
-        self.retry_callback = None
-        self._X_MS_VERSION = DEFAULT_X_MS_VERSION
-        self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING
-
-    def _update_user_agent_string(self, service_package_version):
-        self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX,
-                                                   service_package_version,
-                                                   USER_AGENT_STRING_SUFFIX)
-
-    @property
-    def socket_timeout(self):
-        return self._httpclient.timeout
-
-    @socket_timeout.setter
-    def socket_timeout(self, value):
-        self._httpclient.timeout = value
-
-    @property
-    def protocol(self):
-        return self._httpclient.protocol
-
-    @protocol.setter
-    def protocol(self, value):
-        self._httpclient.protocol = value
-
-    @property
-    def request_session(self):
-        return self._httpclient.session
-
-    @request_session.setter
-    def request_session(self, value):
-        self._httpclient.session = value
-
-    def set_proxy(self, host, port, user=None, password=None):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        :param str host: Address of the proxy. Ex: '192.168.0.100'
-        :param int port: Port of the proxy. Ex: 6000
-        :param str user: User for proxy authorization.
-        :param str password: Password for proxy authorization.
-        '''
-        self._httpclient.set_proxy(host, port, user, password)
-
-    def _get_host_locations(self, primary=True, secondary=False):
-        locations = {}
-        if primary:
-            locations[LocationMode.PRIMARY] = self.primary_endpoint
-        if secondary:
-            locations[LocationMode.SECONDARY] = self.secondary_endpoint
-        return locations
-
-    def _apply_host(self, request, operation_context, retry_context):
-        if operation_context.location_lock and operation_context.host_location:
-            # If this is a location locked operation and the location is set, 
-            # override the request location and host_location.
-            request.host_locations = operation_context.host_location
-            request.host = list(operation_context.host_location.values())[0]
-            retry_context.location_mode = list(operation_context.host_location.keys())[0]
-        elif len(request.host_locations) == 1:
-            # If only one location is allowed, use that location.
-            request.host = list(request.host_locations.values())[0]
-            retry_context.location_mode = list(request.host_locations.keys())[0]
-        else:
-            # If multiple locations are possible, choose based on the location mode.
-            request.host = request.host_locations.get(self.location_mode)
-            retry_context.location_mode = self.location_mode
-
-    @staticmethod
-    def extract_date_and_request_id(retry_context):
-        if getattr(retry_context, 'response', None) is None:
-            return ""
-        resp = retry_context.response
-
-        if 'date' in resp.headers and 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Timestamp={0}, Server-Request-ID={1}",
-                              resp.headers['date'], resp.headers['x-ms-request-id'])
-        elif 'date' in resp.headers:
-            return str.format("Server-Timestamp={0}", resp.headers['date'])
-        elif 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id'])
-        else:
-            return ""
-
-    def _perform_request(self, request, parser=None, parser_args=None, operation_context=None):
-        '''
-        Sends the request and return response. Catches HTTPError and hands it
-        to error handler
-        '''
-        operation_context = operation_context or _OperationContext()
-        retry_context = RetryContext()
-        retry_context.is_emulated = self.is_emulated
-
-        # if request body is a stream, we need to remember its current position in case retries happen
-        if hasattr(request.body, 'read'):
-            try:
-                retry_context.body_position = request.body.tell()
-            except (AttributeError, UnsupportedOperation):
-                # if body position cannot be obtained, then retries will not work
-                pass
-
-        # Apply the appropriate host based on the location mode
-        self._apply_host(request, operation_context, retry_context)
-
-        # Apply common settings to the request
-        _update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
-        client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
-
-        while True:
-            try:
-                try:
-                    # Execute the request callback 
-                    if self.request_callback:
-                        self.request_callback(request)
-
-                    # Add date and auth after the callback so date doesn't get too old and 
-                    # authentication is still correct if signed headers are added in the request 
-                    # callback. This also ensures retry policies with long back offs 
-                    # will work as it resets the time sensitive headers.
-                    _add_date_header(request)
-
-                    try:
-                        # request can be signed individually
-                        self.authentication.sign_request(request)
-                    except AttributeError:
-                        # session can also be signed
-                        self.request_session = self.authentication.signed_session(self.request_session)
-
-                    # Set the request context
-                    retry_context.request = request
-
-                    # Log the request before it goes out
-                    logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                request.method,
-                                request.path,
-                                request.query,
-                                str(request.headers).replace('\n', ''))
-
-                    # Perform the request
-                    response = self._httpclient.perform_request(request)
-
-                    # Execute the response callback
-                    if self.response_callback:
-                        self.response_callback(response)
-
-                    # Set the response context
-                    retry_context.response = response
-
-                    # Log the response when it comes back
-                    logger.info("%s Receiving Response: "
-                                "%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                self.extract_date_and_request_id(retry_context),
-                                response.status,
-                                response.message,
-                                str(request.headers).replace('\n', ''))
-
-                    # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
-                    if response.status >= 300:
-                        # This exception will be caught by the general error handler
-                        # and raised as an azure http exception
-                        _http_error_handler(
-                            HTTPError(response.status, response.message, response.headers, response.body))
-
-                    # Parse the response
-                    if parser:
-                        if parser_args:
-                            args = [response]
-                            args.extend(parser_args)
-                            return parser(*args)
-                        else:
-                            return parser(response)
-                    else:
-                        return
-                except AzureException as ex:
-                    retry_context.exception = ex
-                    raise ex
-                except Exception as ex:
-                    retry_context.exception = ex
-                    if sys.version_info >= (3,):
-                        # Automatic chaining in Python 3 means we keep the trace
-                        raise AzureException(ex.args[0])
-                    else:
-                        # There isn't a good solution in 2 for keeping the stack trace 
-                        # in general, or that will not result in an error in 3
-                        # However, we can keep the previous error type and message
-                        # TODO: In the future we will log the trace
-                        msg = ""
-                        if len(ex.args) > 0:
-                            msg = ex.args[0]
-                        raise AzureException('{}: {}'.format(ex.__class__.__name__, msg))
-
-            except AzureException as ex:
-                # only parse the strings used for logging if logging is at least enabled for CRITICAL
-                if logger.isEnabledFor(logging.CRITICAL):
-                    exception_str_in_one_line = str(ex).replace('\n', '')
-                    status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
-                    timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
-
-                logger.info("%s Operation failed: checking if the operation should be retried. "
-                            "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
-                            client_request_id_prefix,
-                            retry_context.count if hasattr(retry_context, 'count') else 0,
-                            timestamp_and_request_id,
-                            status_code,
-                            exception_str_in_one_line)
-
-                # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
-                # will not be resolved with retries.
-                if str(ex) == _ERROR_DECRYPTION_FAILURE:
-                    logger.error("%s Encountered decryption failure: this cannot be retried. "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-
-                # Determine whether a retry should be performed and if so, how 
-                # long to wait before performing retry.
-                retry_interval = self.retry(retry_context)
-                if retry_interval is not None:
-                    # Execute the callback
-                    if self.retry_callback:
-                        self.retry_callback(retry_context)
-
-                    logger.info(
-                        "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
-                        client_request_id_prefix,
-                        retry_context.count,
-                        retry_interval)
-
-                    # Sleep for the desired retry interval
-                    sleep(retry_interval)
-                else:
-                    logger.error("%s Retry policy did not allow for a retry: "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-            finally:
-                # If this is a location locked operation and the location is not set, 
-                # this is the first request of that operation. Set the location to 
-                # be used for subsequent requests in the operation.
-                if operation_context.location_lock and not operation_context.host_location:
-                    # note: to cover the emulator scenario, the host_location is grabbed
-                    # from request.host_locations(which includes the dev account name)
-                    # instead of request.host(which at this point no longer includes the dev account name)
-                    operation_context.host_location = {retry_context.location_mode: request.host_locations[retry_context.location_mode]}
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/tokencredential.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/tokencredential.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/common/tokencredential.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/common/tokencredential.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import requests
-
-
-class TokenCredential(object):
-    """
-    Represents a token credential that is used to authorize HTTPS requests.
-    The token can be updated by the user.
-
-    :ivar str token:
-        The authorization token. It can be set by the user at any point in a thread-safe way.
-    """
-
-    def __init__(self, initial_value=None):
-        """
-        :param initial_value: initial value for the token.
-        """
-        self.token = initial_value
-
-    def signed_session(self, session=None):
-        """
-        Sign requests session with the token. This method is called every time a request is going on the wire.
-        The user is responsible for updating the token with the preferred tool/SDK.
-        In general there are two options:
-            - override this method to update the token in a preferred way and set Authorization header on session
-            - not override this method, and have a timer that triggers periodically to update the token on this class
-
-        The second option is recommended as it tends to be more performance-friendly.
-
-        :param session: The session to configure for authentication
-        :type session: requests.Session
-        :rtype: requests.Session
-        """
-        session = session or requests.Session()
-        session.headers['Authorization'] = "Bearer {}".format(self.token)
-
-        return session
-
-    def token(self, new_value):
-        """
-        :param new_value: new value to be set as the token.
-        """
-        self.token = new_value
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from .fileservice import FileService
-from .models import (
-    Share,
-    ShareProperties,
-    File,
-    FileProperties,
-    Directory,
-    DirectoryProperties,
-    FileRange,
-    ContentSettings,
-    CopyProperties,
-    SharePermissions,
-    FilePermissions,
-    DeleteSnapshot,
-)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,11 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '1.2.0rc1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-11-09'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,241 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from .models import (
-    Share,
-    Directory,
-    File,
-    FileProperties,
-    FileRange,
-    ShareProperties,
-    DirectoryProperties,
-)
-from ..common.models import (
-    _list,
-)
-from ..common._deserialization import (
-    _parse_properties,
-    _parse_metadata,
-)
-from ..common._error import _validate_content_match
-from ..common._common_conversion import (
-    _get_content_md5,
-    _to_str,
-)
-
-def _parse_snapshot_share(response, name):
-    '''
-    Extracts snapshot return header.
-    '''
-    snapshot = response.headers.get('x-ms-snapshot')
-
-    return _parse_share(response, name, snapshot)
-
-def _parse_share(response, name, snapshot=None):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ShareProperties)
-    return Share(name, props, metadata, snapshot)
-
-
-def _parse_directory(response, name):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, DirectoryProperties)
-    return Directory(name, props, metadata)
-
-
-def _parse_file(response, name, validate_content=False):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, FileProperties)
-
-    # For range gets, only look at 'x-ms-content-md5' for overall MD5
-    content_settings = getattr(props, 'content_settings')
-    if 'content-range' in response.headers:
-        if 'x-ms-content-md5' in response.headers:
-            setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5']))
-        else:
-            delattr(content_settings, 'content_md5')
-
-    if validate_content:
-        computed_md5 = _get_content_md5(response.body)
-        _validate_content_match(response.headers['content-md5'], computed_md5)
-
-    return File(name, response.body, props, metadata)
-
-
-def _convert_xml_to_shares(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults AccountName="https://myaccount.file.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Shares>
-        <Share>
-          <Name>share-name</Name>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Quota>max-share-size</Quota>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Share>
-      </Shares>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    shares = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(shares, 'next_marker', next_marker)
-
-    shares_element = list_element.find('Shares')
-
-    for share_element in shares_element.findall('Share'):
-        # Name element
-        share = Share()
-        share.name = share_element.findtext('Name')
-
-        # Snapshot
-        share.snapshot = share_element.findtext('Snapshot')
-
-        # Metadata
-        metadata_root_element = share_element.find('Metadata')
-        if metadata_root_element is not None:
-            share.metadata = dict()
-            for metadata_element in metadata_root_element:
-                share.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = share_element.find('Properties')
-        share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        share.properties.etag = properties_element.findtext('Etag')
-        share.properties.quota = int(properties_element.findtext('Quota'))
-
-        # Add share to list
-        shares.append(share)
-
-    return shares
-
-
-def _convert_xml_to_directories_and_files(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.file.core.windows.net/" ShareName="myshare" DirectoryPath="directory-path">
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Entries>
-        <File>
-          <Name>file-name</Name>
-          <Properties>
-            <Content-Length>size-in-bytes</Content-Length>
-          </Properties>
-        </File>
-        <Directory>
-          <Name>directory-name</Name>
-        </Directory>
-      </Entries>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    entries = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(entries, 'next_marker', next_marker)
-
-    entries_element = list_element.find('Entries')
-
-    for file_element in entries_element.findall('File'):
-        # Name element
-        file = File()
-        file.name = file_element.findtext('Name')
-
-        # Properties
-        properties_element = file_element.find('Properties')
-        file.properties.content_length = int(properties_element.findtext('Content-Length'))
-
-        # Add file to list
-        entries.append(file)
-
-    for directory_element in entries_element.findall('Directory'):
-        # Name element
-        directory = Directory()
-        directory.name = directory_element.findtext('Name')
-
-        # Add directory to list
-        entries.append(directory)
-
-    return entries
-
-
-def _convert_xml_to_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <Ranges>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-    </Ranges>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    ranges = list()
-    ranges_element = ETree.fromstring(response.body)
-
-    for range_element in ranges_element.findall('Range'):
-        # Parse range
-        range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End')))
-
-        # Add range to list
-        ranges.append(range)
-
-    return ranges
-
-
-def _convert_xml_to_share_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <ShareStats>
-       <ShareUsage>15</ShareUsage>
-    </ShareStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    share_stats_element = ETree.fromstring(response.body)
-    return int(share_stats_element.findtext('ShareUsage'))
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_download_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_download_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_download_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_download_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import threading
-
-from ..common._error import _ERROR_NO_SINGLE_THREAD_CHUNKING
-
-
-def _download_file_chunks(file_service, share_name, directory_name, file_name,
-                          download_size, block_size, progress, start_range, end_range, 
-                          stream, max_connections, progress_callback, validate_content, 
-                          timeout, operation_context, snapshot):
-    if max_connections <= 1:
-        raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('file'))
-
-    downloader = _FileChunkDownloader(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        download_size,
-        block_size,
-        progress,
-        start_range,
-        end_range,
-        stream,
-        progress_callback,
-        validate_content,
-        timeout,
-        operation_context,
-        snapshot,
-    )
-
-    import concurrent.futures
-    executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-    result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-
-
-class _FileChunkDownloader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name, 
-                 download_size, chunk_size, progress, start_range, end_range, 
-                 stream, progress_callback, validate_content, timeout, operation_context, snapshot):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.chunk_size = chunk_size
-
-        self.download_size = download_size
-        self.start_index = start_range
-        self.file_end = end_range
-
-        self.stream = stream
-        self.stream_start = stream.tell()
-        self.stream_lock = threading.Lock()
-        self.progress_callback = progress_callback
-        self.progress_total = progress
-        self.progress_lock = threading.Lock()
-        self.validate_content = validate_content
-        self.timeout = timeout
-        self.operation_context = operation_context
-        self.snapshot = snapshot
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.file_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.file_end:
-            chunk_end = self.file_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total = self.progress_total
-                self.progress_callback(total, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-            self.stream.write(chunk_data)
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        return self.file_service._get_file(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            start_range=chunk_start,
-            end_range=chunk_end - 1,
-            validate_content=self.validate_content,
-            timeout=self.timeout,
-            _context=self.operation_context,
-            snapshot=self.snapshot
-        )
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,66 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _str
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-
-
-def _get_path(share_name=None, directory_name=None, file_name=None):
-    '''
-    Creates the path to access a file resource.
-
-    share_name:
-        Name of share.
-    directory_name:
-        The path to the directory.
-    file_name:
-        Name of file.
-    '''
-    if share_name and directory_name and file_name:
-        return '/{0}/{1}/{2}'.format(
-            _str(share_name),
-            _str(directory_name),
-            _str(file_name))
-    elif share_name and directory_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(directory_name))
-    elif share_name and file_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(file_name))
-    elif share_name:
-        return '/{0}'.format(_str(share_name))
-    else:
-        return '/'
-
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
-                                       end_range_required=True, check_content_md5=False):
-    # If end range is provided, start range must be provided
-    if start_range_required or end_range is not None:
-        _validate_not_none('start_range', start_range)
-    if end_range_required:
-        _validate_not_none('end_range', end_range)
-
-    # Format based on whether end_range is present
-    request.headers = request.headers or {}
-    if end_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers['x-ms-range-get-content-md5'] = 'true'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_upload_chunking.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_upload_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/_upload_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/_upload_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,133 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import threading
-
-
-def _upload_file_chunks(file_service, share_name, directory_name, file_name,
-                        file_size, block_size, stream, max_connections,
-                        progress_callback, validate_content, timeout):
-    uploader = _FileChunkUploader(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        file_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        timeout
-    )
-
-    if progress_callback is not None:
-        progress_callback(0, file_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
-    else:
-        if file_size is not None:
-            range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
-        else:
-            range_ids = uploader.process_all_unknown_size()
-
-    return range_ids
-
-
-class _FileChunkUploader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name,
-                 file_size, chunk_size, stream, parallel, progress_callback,
-                 validate_content, timeout):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.file_size = file_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock() if parallel else None
-        self.validate_content = validate_content
-        self.timeout = timeout
-
-    def get_chunk_offsets(self):
-        index = 0
-        if self.file_size is None:
-            # we don't know the size of the stream, so we have no
-            # choice but to seek
-            while True:
-                data = self._read_from_stream(index, 1)
-                if not data:
-                    break
-                yield index
-                index += self.chunk_size
-        else:
-            while index < self.file_size:
-                yield index
-                index += self.chunk_size
-
-    def process_chunk(self, chunk_offset):
-        size = self.chunk_size
-        if self.file_size is not None:
-            size = min(size, self.file_size - chunk_offset)
-        chunk_data = self._read_from_stream(chunk_offset, size)
-        return self._upload_chunk_with_progress(chunk_offset, chunk_data)
-
-    def process_all_unknown_size(self):
-        assert self.stream_lock is None
-        range_ids = []
-        index = 0
-        while True:
-            data = self._read_from_stream(None, self.chunk_size)
-            if data:
-                index += len(data)
-                range_id = self._upload_chunk_with_progress(index, data)
-                range_ids.append(range_id)
-            else:
-                break
-
-        return range_ids
-
-    def _read_from_stream(self, offset, count):
-        if self.stream_lock is not None:
-            with self.stream_lock:
-                self.stream.seek(self.stream_start + offset)
-                data = self.stream.read(count)
-        else:
-            data = self.stream.read(count)
-        return data
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.file_size)
-
-    def _upload_chunk_with_progress(self, chunk_start, chunk_data):
-        chunk_end = chunk_start + len(chunk_data) - 1
-        self.file_service.update_range(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            chunk_data,
-            chunk_start,
-            chunk_end,
-            self.validate_content,
-            timeout=self.timeout
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end)
-        self._update_progress(len(chunk_data))
-        return range_id
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/fileservice.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/fileservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/fileservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/fileservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2468 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from azure.common import AzureHttpError
-
-from ..common._auth import (
-    _StorageSharedKeyAuthentication,
-    _StorageSASAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _get_content_md5,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _parse_metadata,
-    _parse_properties,
-    _parse_length_from_content_range,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_STORAGE_MISSING_INFO,
-    _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-    _validate_access_policies,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _get_data_bytes_only,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    FileSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_shares,
-    _convert_xml_to_directories_and_files,
-    _convert_xml_to_ranges,
-    _convert_xml_to_share_stats,
-    _parse_file,
-    _parse_share,
-    _parse_snapshot_share,
-    _parse_directory,
-)
-from ._download_chunking import _download_file_chunks
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import _upload_file_chunks
-from .models import (
-    FileProperties,
-)
-
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class FileService(StorageClient):
-    '''
-    The Server Message Block (SMB) protocol is the preferred file share protocol
-    used on premise today. The Microsoft Azure File service enables customers to
-    leverage the availability and scalability of Azure's Cloud Infrastructure as
-    a Service (IaaS) SMB without having to rewrite SMB client applications.
-
-    The Azure File service also offers a compelling alternative to traditional
-    Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which
-    are often complex and expensive to install, configure, and operate.
-
-    :ivar int MAX_SINGLE_GET_SIZE:
-        The size of the first range get performed by get_file_to_* methods if
-        max_connections is greater than 1. Less data will be returned if the
-        file is smaller than this.
-    :ivar int MAX_CHUNK_GET_SIZE:
-        The size of subsequent range gets performed by get_file_to_* methods if
-        max_connections is greater than 1 and the file is larger than MAX_SINGLE_GET_SIZE.
-        Less data will be returned if the remainder of the file is smaller than
-        this. If this is set to larger than 4MB, content_validation will throw an
-        error if enabled. However, if content_validation is not desired a size
-        greater than 4MB may be optimal. Setting this below 4MB is not recommended.
-    :ivar int MAX_RANGE_SIZE:
-        The size of the ranges put by create_file_from_* methods. Smaller ranges
-        may be put if there is less data provided. The maximum range size the service
-        supports is 4MB.
-    '''
-    MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024
-    MAX_RANGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests
-            signed with an account key and to construct the storage endpoint. It
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests
-             instead of the account key. If account key and sas token are both
-             specified, account key will be used to sign.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use the China cloud
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'file',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(FileService, self).__init__(service_params)
-
-        if self.account_name == DEV_ACCOUNT_NAME:
-            raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def make_file_url(self, share_name, directory_name, file_name,
-                      protocol=None, sas_token=None):
-        '''
-        Creates the url to access a file.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when FileService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: file access URL.
-        :rtype: str
-        '''
-
-        if directory_name is None:
-            url = '{}://{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                file_name,
-            )
-        else:
-            url = '{}://{}/{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                directory_name,
-                file_name,
-            )
-
-        if sas_token:
-            url += '?' + sas_token
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the file service.
-        Use the returned signature with the sas_token parameter of the FileService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.FILE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_share_shared_access_signature(self, share_name,
-                                               permission=None,
-                                               expiry=None,
-                                               start=None,
-                                               id=None,
-                                               ip=None,
-                                               protocol=None,
-                                               cache_control=None,
-                                               content_disposition=None,
-                                               content_encoding=None,
-                                               content_language=None,
-                                               content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use :func:`~set_share_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_share(
-            share_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_file_shared_access_signature(self, share_name,
-                                              directory_name=None,
-                                              file_name=None,
-                                              permission=None,
-                                              expiry=None,
-                                              start=None,
-                                              id=None,
-                                              ip=None,
-                                              protocol=None,
-                                              cache_control=None,
-                                              content_disposition=None,
-                                              content_encoding=None,
-                                              content_language=None,
-                                              content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_file(
-            share_name,
-            directory_name,
-            file_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def set_file_service_properties(self, hour_metrics=None, minute_metrics=None,
-                                    cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's File service, including
-        Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for files.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for files.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors))
-
-        self._perform_request(request)
-
-    def get_file_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's File service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The file service properties.
-        :rtype:
-            :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def list_shares(self, prefix=None, marker=None, num_results=None,
-                    include_metadata=False, timeout=None, include_snapshots=False):
-        '''
-        Returns a generator to list the shares under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all shares have been returned or num_results 
-        is reached.
-
-        If num_results is specified and the account has more than that number of 
-        shares, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of shares to return.
-        :param bool include_metadata:
-            Specifies that share metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param bool include_snapshots:
-            Specifies that share snapshots be returned in the response.
-        '''
-        include = 'snapshots' if include_snapshots else None
-        if include_metadata:
-            if include is not None:
-                include = include + ',metadata'
-            else:
-                include = 'metadata'
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_shares(**kwargs)
-
-        return ListGenerator(resp, self._list_shares, (), kwargs)
-
-    def _list_shares(self, prefix=None, marker=None, max_results=None,
-                     include=None, timeout=None, _context=None):
-        '''
-        Returns a list of the shares under the specified account.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of shares to return. A single list
-            request may return up to 1000 shares and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param string include:
-            Include this parameter to specify that either the share's
-            metadata, snapshots or both be returned as part of the response body. set this
-            parameter to string 'metadata' to get share's metadata. set this parameter to 'snapshots'
-            to get all the share snapshots. for both use 'snapshots,metadata'.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_shares, operation_context=_context)
-
-    def create_share(self, share_name, metadata=None, quota=None,
-                     fail_on_exist=False, timeout=None):
-        '''
-        Creates a new share under the specified account. If the share
-        with the same name already exists, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of share to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str)
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5TB (5120).
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the share exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if share is created, False if share already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None):
-        '''
-        Creates a snapshot of an existing share under the specified account.
-
-        :param str share_name:
-            The name of the share to create a snapshot of.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: a dict of str to str:
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be
-            greater than 0, and less than or equal to 5TB (5120).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: azure.storage.file.models.Share
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp':  'snapshot',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_snapshot_share, [share_name])
-
-    def get_share_properties(self, share_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified share. The data returned does not include the shares's
-        list of files or directories.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A Share that exposes properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.Share`
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-             'restype': 'share',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_share, [share_name])
-
-    def set_share_properties(self, share_name, quota, timeout=None):
-        '''
-        Sets service-defined properties for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5 TB (5120 GB).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('quota', quota)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-
-        self._perform_request(request)
-
-    def get_share_metadata(self, share_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the share metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-             'restype': 'share',
-             'comp': 'metadata',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_share_metadata(self, share_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        share. Each call to this operation replaces all existing metadata
-        attached to the share. To remove all metadata from the share,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param metadata:
-            A dict containing name-value pairs to associate with the share as 
-            metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def get_share_acl(self, share_name, timeout=None):
-        '''
-        Gets the permissions for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the share.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_share_acl(self, share_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets the permissions for the specified share or stored access 
-        policies that may be used with Shared Access Signatures.
-
-        :param str share_name:
-            Name of existing share.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the share. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        self._perform_request(request)
-
-    def get_share_stats(self, share_name, timeout=None):
-        '''
-        Gets the approximate size of the data stored on the share,
-        rounded up to the nearest gigabyte.
-        
-        Note that this value may not include all recently created
-        or recently resized files.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the approximate size of the data stored on the share.
-        :rtype: int
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_share_stats)
-
-    def delete_share(self, share_name, fail_not_exist=False, timeout=None, snapshot=None, delete_snapshots=None):
-        '''
-        Marks the specified share for deletion. If the share
-        does not exist, the operation fails on the service. By 
-        default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of share to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the share doesn't
-            exist. False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-            Specify this argument to delete a specific snapshot only.
-            delete_snapshots must be None if this is specified.
-        :param ~azure.storage.file.models.DeleteSnapshot delete_snapshots:
-            To delete a share that has snapshots, this must be specified as DeleteSnapshot.Include.
-        :return: True if share is deleted, False share doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.headers = {
-            'x-ms-delete-snapshots': _to_str(delete_snapshots)
-        }
-        request.query = {
-             'restype': 'share',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def create_directory(self, share_name, directory_name, metadata=None,
-                         fail_on_exist=False, timeout=None):
-        '''
-        Creates a new directory under the specified share or parent directory. 
-        If the directory with the same name already exists, the operation fails
-        on the service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to create, including the path to the parent 
-            directory.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str):
-        :param bool fail_on_exist:
-            specify whether to throw an exception when the directory exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is created, False if directory already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def delete_directory(self, share_name, directory_name,
-                         fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified empty directory. Note that the directory must
-        be empty before it can be deleted. Attempting to delete directories 
-        that are not empty will fail.
-
-        If the directory does not exist, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to delete, including the path to the parent 
-            directory.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the directory doesn't
-            exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is deleted, False otherwise.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_directory_properties(self, share_name, directory_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified directory. The data returned does not include the directory's
-        list of files.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-           The path to an existing directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified directory within a directory object.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :rtype: :class:`~azure.storage.file.models.Directory`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-             'restype': 'directory',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_directory, [directory_name])
-
-    def get_directory_metadata(self, share_name, directory_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified directory.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the directory metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-             'restype': 'directory',
-             'comp': 'metadata',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        directory. Each call to this operation replaces all existing metadata
-        attached to the directory. To remove all metadata from the directory,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param metadata:
-            A dict containing name-value pairs to associate with the directory
-            as metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def list_directories_and_files(self, share_name, directory_name=None,
-                                   num_results=None, marker=None, timeout=None,
-                                   prefix=None, snapshot=None):
-
-        '''
-        Returns a generator to list the directories and files under the specified share.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all directories and files have been returned or
-        num_results is reached.
-
-        If num_results is specified and the share has more than that number of 
-        files and directories, the generator will have a populated next_marker 
-        field once it finishes. This marker can be used to create a new generator 
-        if more results are desired.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int num_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str prefix:
-            List only the files and/or directories with the given prefix.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (share_name, directory_name)
-        kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout,
-                  '_context': operation_context, 'prefix': prefix, 'snapshot': snapshot}
-
-        resp = self._list_directories_and_files(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_directories_and_files, args, kwargs)
-
-    def _list_directories_and_files(self, share_name, directory_name=None,
-                                   marker=None, max_results=None, timeout=None,
-                                    prefix=None, _context=None, snapshot=None):
-        '''
-        Returns a list of the directories and files under the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str prefix:
-            List only the files and/or directories with the given prefix.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-             'restype': 'directory',
-             'comp': 'list',
-             'prefix': _to_str(prefix),
-             'marker': _to_str(marker),
-             'maxresults': _int_to_str(max_results),
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _convert_xml_to_directories_and_files,
-                                     operation_context=_context)
-
-    def get_file_properties(self, share_name, directory_name, file_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file. Returns an instance of :class:`~azure.storage.file.models.File` with
-        :class:`~azure.storage.file.models.FileProperties` and a metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: a file object including properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)}
-
-        return self._perform_request(request, _parse_file, [file_name])
-
-    def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None):
-        '''
-        Returns a boolean indicating whether the share exists if only share name is
-        given. If directory_name is specificed a boolean will be returned indicating
-        if the directory exists. If file_name is specified as well, a boolean will be
-        returned indicating if the file exists.
-
-        :param str share_name:
-            Name of a share.
-        :param str directory_name:
-            The path to a directory.
-        :param str file_name:
-            Name of a file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        try:
-            if file_name is not None:
-                self.get_file_properties(share_name, directory_name, file_name, timeout=timeout, snapshot=snapshot)
-            elif directory_name is not None:
-                self.get_directory_properties(share_name, directory_name, timeout=timeout, snapshot=snapshot)
-            else:
-                self.get_share_properties(share_name, timeout=timeout, snapshot=snapshot)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def resize_file(self, share_name, directory_name,
-                    file_name, content_length, timeout=None):
-        '''
-        Resizes a file to the specified size. If the specified byte
-        value is less than the current size of the file, then all
-        ranges above the specified byte value are cleared.
-        
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int content_length:
-            The length to resize the file to.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-content-length': _to_str(content_length)
-        }
-
-        self._perform_request(request)
-
-    def set_file_properties(self, share_name, directory_name, file_name,
-                            content_settings, timeout=None):
-        '''
-        Sets system properties on the file. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set the file properties.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_settings', content_settings)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = content_settings._to_headers()
-
-        self._perform_request(request)
-
-    def get_file_metadata(self, share_name, directory_name, file_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the file metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-             'comp': 'metadata',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_file_metadata(self, share_name, directory_name,
-                          file_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified file as one or more
-        name-value pairs.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the file. To remove all
-            metadata from the file, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def copy_file(self, share_name, directory_name, file_name, copy_source,
-                  metadata=None, timeout=None):
-        '''
-        Copies a file asynchronously. This operation returns a copy operation 
-        properties object, including a copy ID you can use to check or abort the 
-        copy operation. The File service copies files on a best-effort basis.
-
-        If the destination file exists, it will be overwritten. The destination 
-        file cannot be modified while the copy operation is in progress.
-
-        :param str share_name:
-            Name of the destination share. The share must exist.
-        :param str directory_name:
-            Name of the destination directory. The directory must exist.
-        :param str file_name:
-            Name of the destination file. If the destination file exists, it will 
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob. 
-            The value should be URL-encoded as it would appear in a request URI. 
-            If the source is in another account, the source must either be public 
-            or must be authenticated via a shared access signature. If the source 
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.file.core.windows.net/myshare/mydir/myfile
-            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
-        :param metadata:
-            Name-value pairs associated with the file as metadata. If no name-value 
-            pairs are specified, the operation will copy the metadata from the 
-            source blob or file to the destination file. If one or more name-value 
-            pairs are specified, the destination file is created with the specified 
-            metadata, and the metadata is not copied from the source blob or file. 
-        :type metadata: dict(str, str).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.file.models.CopyProperties`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_source', copy_source)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-copy-source': _to_str(copy_source),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_properties, [FileProperties]).copy
-
-    def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None):
-        '''
-         Aborts a pending copy_file operation, and leaves a destination file
-         with zero length and full metadata.
-
-        :param str share_name:
-             Name of destination share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-             Name of destination file.
-        :param str copy_id:
-            Copy identifier provided in the copy.id of the original
-            copy_file operation.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'copy',
-            'copyid': _to_str(copy_id),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-copy-action': 'abort',
-        }
-
-        self._perform_request(request)
-
-    def delete_file(self, share_name, directory_name, file_name, timeout=None):
-        '''
-        Marks the specified file for deletion. The file is later
-        deleted during garbage collection.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-
-        self._perform_request(request)
-
-    def create_file(self, share_name, directory_name, file_name,
-                    content_length, content_settings=None, metadata=None,
-                    timeout=None):
-        '''
-        Creates a new file.
-
-        See create_file_from_* for high level functions that handle the
-        creation and upload of large files with automatic chunking and
-        progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param int content_length:
-            Length of the file in bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-content-length': _to_str(content_length),
-            'x-ms-type': 'file'
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        self._perform_request(request)
-
-    def create_file_from_path(self, share_name, directory_name, file_name,
-                              local_file_path, content_settings=None,
-                              metadata=None, validate_content=False, progress_callback=None,
-                              max_connections=2, timeout=None):
-        '''
-        Creates a new azure file from a local file path, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str local_file_path:
-            Path of the local file to upload as the file content.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used for setting file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('local_file_path', local_file_path)
-
-        count = path.getsize(local_file_path)
-        with open(local_file_path, 'rb') as stream:
-            self.create_file_from_stream(
-                share_name, directory_name, file_name, stream,
-                count, content_settings, metadata, validate_content, progress_callback,
-                max_connections, timeout)
-
-    def create_file_from_text(self, share_name, directory_name, file_name,
-                              text, encoding='utf-8', content_settings=None,
-                              metadata=None, validate_content=False, timeout=None):
-        '''
-        Creates a new file from str/unicode, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str text:
-            Text to upload to the file.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        self.create_file_from_bytes(
-            share_name, directory_name, file_name, text, count=len(text),
-            content_settings=content_settings, metadata=metadata,
-            validate_content=validate_content, timeout=timeout)
-
-    def create_file_from_bytes(
-            self, share_name, directory_name, file_name, file,
-            index=0, count=None, content_settings=None, metadata=None,
-            validate_content=False, progress_callback=None, max_connections=2,
-            timeout=None):
-        '''
-        Creates a new file from an array of bytes, or updates the content
-        of an existing file, with automatic chunking and progress
-        notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str file:
-            Content of file as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file', file)
-        _validate_type_bytes('file', file)
-
-        if index < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(file) - index
-
-        stream = BytesIO(file)
-        stream.seek(index)
-
-        self.create_file_from_stream(
-            share_name, directory_name, file_name, stream, count,
-            content_settings, metadata, validate_content, progress_callback,
-            max_connections, timeout)
-
-    def create_file_from_stream(
-            self, share_name, directory_name, file_name, stream, count,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, timeout=None):
-        '''
-        Creates a new file from a file/stream, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the file content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a
-            file cannot be created if the count is unknown.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use. Note that parallel upload 
-            requires the stream to be seekable.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-
-        if count < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        self.create_file(
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            content_settings,
-            metadata,
-            timeout
-        )
-
-        _upload_file_chunks(
-            self,
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            self.MAX_RANGE_SIZE,
-            stream,
-            max_connections,
-            progress_callback,
-            validate_content,
-            timeout
-        )
-
-    def _get_file(self, share_name, directory_name, file_name,
-                 start_range=None, end_range=None, validate_content=False,
-                 timeout=None, _context=None, snapshot=None):
-        '''
-        Downloads a file's content, metadata, and properties. You can specify a
-        range if you don't need to download the file in its entirety. If no range
-        is specified, the full file will be downloaded.
-
-        See get_file_to_* for high level functions that handle the download
-        of large files with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            When this is set to True and specified together with the Range header, 
-            the service returns the MD5 hash for the range, as long as the range 
-            is less than or equal to 4 MB in size.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)}
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=validate_content)
-
-        return self._perform_request(request, _parse_file,
-                                     [file_name, validate_content],
-                                     operation_context=_context)
-
-    def get_file_to_path(self, share_name, directory_name, file_name, file_path,
-                         open_mode='wb', start_range=None, end_range=None,
-                         validate_content=False, progress_callback=None,
-                         max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file to a file path, with automatic chunking and progress
-        notifications. Returns an instance of File with properties and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str file_path:
-            Path of file to write to.
-        :param str open_mode:
-            Mode to use when opening the file. Note that specifying append only 
-            open_mode prevents parallel download. So, max_connections must be set 
-            to 1 if this open_mode is used.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        if max_connections > 1 and 'a' in open_mode:
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        with open(file_path, open_mode) as stream:
-            file = self.get_file_to_stream(
-                share_name, directory_name, file_name, stream,
-                start_range, end_range, validate_content,
-                progress_callback, max_connections, timeout, snapshot)
-
-        return file
-
-    def get_file_to_stream(
-        self, share_name, directory_name, file_name, stream,
-        start_range=None, end_range=None, validate_content=False,
-        progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties
-        and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param io.IOBase stream:
-            Opened file/stream to write to.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-
-        # If the user explicitly sets max_connections to 1, do a single shot download
-        if max_connections == 1:
-            file = self._get_file(share_name,
-                                  directory_name,
-                                  file_name,
-                                  start_range=start_range,
-                                  end_range=end_range,
-                                  validate_content=validate_content,
-                                  timeout=timeout,
-                                  snapshot=snapshot)
-
-            # Set the download size
-            download_size = file.properties.content_length
-
-        # If max_connections is greater than 1, do the first get to establish the 
-        # size of the file and get the first segment of data
-        else:
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-            # The service only provides transactional MD5s for chunks under 4MB.           
-            # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first 
-            # chunk so a transactional MD5 can be retrieved.
-            first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
-
-            initial_request_start = start_range if start_range is not None else 0
-
-            if end_range is not None and end_range - start_range < first_get_size:
-                initial_request_end = end_range
-            else:
-                initial_request_end = initial_request_start + first_get_size - 1
-
-            # Send a context object to make sure we always retry to the initial location
-            operation_context = _OperationContext(location_lock=True)
-            try:
-                file = self._get_file(share_name,
-                                      directory_name,
-                                      file_name,
-                                      start_range=initial_request_start,
-                                      end_range=initial_request_end,
-                                      validate_content=validate_content,
-                                      timeout=timeout,
-                                      _context=operation_context,
-                                      snapshot=snapshot)
-
-                # Parse the total file size and adjust the download size if ranges 
-                # were specified
-                file_size = _parse_length_from_content_range(file.properties.content_range)
-                if end_range is not None:
-                    # Use the end_range unless it is over the end of the file
-                    download_size = min(file_size, end_range - start_range + 1)
-                elif start_range is not None:
-                    download_size = file_size - start_range
-                else:
-                    download_size = file_size
-            except AzureHttpError as ex:
-                if start_range is None and ex.status_code == 416:
-                    # Get range will fail on an empty file. If the user did not 
-                    # request a range, do a regular get request in order to get 
-                    # any properties.
-                    file = self._get_file(share_name,
-                                          directory_name,
-                                          file_name,
-                                          validate_content=validate_content,
-                                          timeout=timeout,
-                                          _context=operation_context,
-                                          snapshot=snapshot)
-
-                    # Set the download size to empty
-                    download_size = 0
-                else:
-                    raise ex
-
-        # Mark the first progress chunk. If the file is small or this is a single 
-        # shot download, this is the only call
-        if progress_callback:
-            progress_callback(file.properties.content_length, download_size)
-
-        # Write the content to the user stream  
-        # Clear file content since output has been written to user stream   
-        if file.content is not None:
-            stream.write(file.content)
-            file.content = None
-
-        # If the file is small or single shot download was used, the download is 
-        # complete at this point. If file size is large, use parallel download.
-        if file.properties.content_length != download_size:
-            # At this point would like to lock on something like the etag so that 
-            # if the file is modified, we dont get a corrupted download. However, 
-            # this feature is not yet available on the file service.
-
-            end_file = file_size
-            if end_range is not None:
-                # Use the end_range unless it is over the end of the file
-                end_file = min(file_size, end_range + 1)
-
-            _download_file_chunks(
-                self,
-                share_name,
-                directory_name,
-                file_name,
-                download_size,
-                self.MAX_CHUNK_GET_SIZE,
-                first_get_size,
-                initial_request_end + 1,  # start where the first download ended
-                end_file,
-                stream,
-                max_connections,
-                progress_callback,
-                validate_content,
-                timeout,
-                operation_context,
-                snapshot
-            )
-
-            # Set the content length to the download size instead of the size of 
-            # the last range
-            file.properties.content_length = download_size
-
-            # Overwrite the content range to the user requested range
-            file.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, file_size)
-
-            # Overwrite the content MD5 as it is the MD5 for the last range instead 
-            # of the stored MD5
-            # TODO: Set to the stored MD5 when the service returns this
-            file.properties.content_md5 = None
-
-        return file
-
-    def get_file_to_bytes(self, share_name, directory_name, file_name,
-                          start_range=None, end_range=None, validate_content=False,
-                          progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`~azure.storage.file.models.File` with
-        properties, metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-
-        stream = BytesIO()
-        file = self.get_file_to_stream(
-            share_name,
-            directory_name,
-            file_name,
-            stream,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            timeout,
-            snapshot)
-
-        file.content = stream.getvalue()
-        return file
-
-    def get_file_to_text(
-        self, share_name, directory_name, file_name, encoding='utf-8',
-        start_range=None, end_range=None, validate_content=False,
-        progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties,
-        metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str encoding:
-            Python encoding to use when decoding the file data.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('encoding', encoding)
-
-        file = self.get_file_to_bytes(
-            share_name,
-            directory_name,
-            file_name,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            timeout,
-            snapshot)
-
-        file.content = file.content.decode(encoding)
-        return file
-
-    def update_range(self, share_name, directory_name, file_name, data,
-                     start_range, end_range, validate_content=False, timeout=None):
-        '''
-        Writes the bytes specified by the request body into the specified range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bytes data:
-            Content of the range.
-        :param int start_range:
-            Start of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('data', data)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'range',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-write': 'update',
-        }
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-        request.body = _get_data_bytes_only('data', data)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        self._perform_request(request)
-
-    def clear_range(self, share_name, directory_name, file_name, start_range,
-                    end_range, timeout=None):
-        '''
-        Clears the specified range and releases the space used in storage for 
-        that range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'range',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'Content-Length': '0',
-            'x-ms-write': 'clear',
-        }
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-
-        self._perform_request(request)
-
-    def list_ranges(self, share_name, directory_name, file_name,
-                    start_range=None, end_range=None, timeout=None, snapshot=None):
-        '''
-        Retrieves the valid ranges for a file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Specifies the start offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            Specifies the end offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :returns: a list of valid ranges
-        :rtype: a list of :class:`~azure.storage.file.models.FileRange`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-             'comp': 'rangelist',
-             'timeout': _int_to_str(timeout),
-             'sharesnapshot': _to_str(snapshot),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False)
-
-        return self._perform_request(request, _convert_xml_to_ranges)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/models.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,407 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _to_str
-
-
-class Share(object):
-    '''
-    File share class.
-    
-    :ivar str name:
-        The name of the share.
-    :ivar ShareProperties properties:
-        System properties for the share.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the share as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list shares operation. If this parameter was specified but the 
-        share has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None, snapshot=None):
-        self.name = name
-        self.properties = props or ShareProperties()
-        self.metadata = metadata
-        self.snapshot = snapshot
-
-
-class ShareProperties(object):
-    '''
-    File share's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the share was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int quote:
-        Returns the current share quota in GB.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.quota = None
-
-
-class Directory(object):
-    '''
-    Directory class.
-    
-    :ivar str name:
-        The name of the directory.
-    :ivar DirectoryProperties properties:
-        System properties for the directory.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the directory as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list directory operation. If this parameter was specified but the 
-        directory has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or DirectoryProperties()
-        self.metadata = metadata
-
-
-class DirectoryProperties(object):
-    '''
-    File directory's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the directory was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar bool server_encrypted:
-        Set to true if the directory metadata is encrypted on the server.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.server_encrypted = None
-
-
-class File(object):
-    '''
-    File class.
-    
-    :ivar str name:
-        The name of the file.
-    :ivar content:
-        File content.
-    :vartype content: str or bytes
-    :ivar FileProperties properties:
-        System properties for the file.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the file as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list file operation. If this parameter was specified but the 
-        file has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self, name=None, content=None, props=None, metadata=None):
-        self.name = name
-        self.content = content
-        self.properties = props or FileProperties()
-        self.metadata = metadata
-
-
-class FileProperties(object):
-    '''
-    File Properties.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the file was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The length of the content returned. If the entire blob was requested, 
-        the length of blob in bytes. If a subset of the blob was requested, the 
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client 
-        requested a subset of the blob.
-    :ivar ~azure.storage.file.models.ContentSettings content_settings:
-        Stores all the content settings for the file.
-    :ivar ~azure.storage.file.models.CopyProperties copy:
-        Stores all the copy properties for the file.
-    ivar bool server_encrypted:
-        Set to true if the file data and application metadata are completely encrypted.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_range = None
-        self.content_settings = ContentSettings()
-        self.copy = CopyProperties()
-        self.server_encrypted = None
-
-
-class ContentSettings(object):
-    '''
-    Used to store the content settings of a file.
-
-    :ivar str content_type:
-        The content type specified for the file. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If content_encoding has previously been set
-        for the file, that value is stored.
-    :ivar str content_language:
-        If content_language has previously been set
-        for the file, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the file, that value is stored.
-    :ivar str cache_control:
-        If cache_control has previously been set for
-        the file, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the file, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None):
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return {
-            'x-ms-cache-control': _to_str(self.cache_control),
-            'x-ms-content-type': _to_str(self.content_type),
-            'x-ms-content-disposition': _to_str(self.content_disposition),
-            'x-ms-content-md5': _to_str(self.content_md5),
-            'x-ms-content-encoding': _to_str(self.content_encoding),
-            'x-ms-content-language': _to_str(self.content_language),
-        }
-
-
-class CopyProperties(object):
-    '''
-    File Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy File operation where this file
-        was the destination file. This header does not appear if this file has never
-        been the destination in a Copy File operation, or if this file has been
-        modified after a concluded Copy File operation using Set File Properties or
-        Put File.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source file used in the last attempted
-        Copy File operation where this file was the destination file. This header does not
-        appear if this file has never been the destination in a Copy File operation, or if
-        this file has been modified after a concluded Copy File operation using
-        Set File Properties or Put File.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending: 
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy File.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy File operation where this file was the destination file. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy File operation where this file was the
-        destination file. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure. 
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class FileRange(object):
-    '''
-    File Range.
-    
-    :ivar int start:
-        Byte index for start of file range.
-    :ivar int end:
-        Byte index for end of file range.
-    '''
-
-    def __init__(self, start=None, end=None):
-        self.start = start
-        self.end = end
-
-
-class DeleteSnapshot(object):
-    '''
-    Required if the Share has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the share and all of its snapshots.
-    '''
-
-
-class FilePermissions(object):
-    '''
-    FilePermissions class to be used with 
-    :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API.
-
-    :ivar FilePermissions FilePermissions.CREATE:
-        Create a new file or copy a file to a new file.
-    :ivar FilePermissions FilePermissions.DELETE: 
-        Delete the file.
-    :ivar FilePermissions FilePermissions.READ:
-        Read the content, properties, metadata. Use the file as the source of a copy 
-        operation.
-    :ivar FilePermissions FilePermissions.WRITE: 
-        Create or write content, properties, metadata. Resize the file. Use the file 
-        as the destination of a copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, create=False, write=False, delete=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata. Use the file as the source of a copy 
-            operation.
-        :param bool create:
-            Create a new file or copy a file to a new file.
-        :param bool write: 
-            Create or write content, properties, metadata. Resize the file. Use the file 
-            as the destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the file.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-FilePermissions.CREATE = FilePermissions(create=True)
-FilePermissions.DELETE = FilePermissions(delete=True)
-FilePermissions.READ = FilePermissions(read=True)
-FilePermissions.WRITE = FilePermissions(write=True)
-
-
-class SharePermissions(object):
-    '''
-    SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature`
-    method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. 
-
-    :ivar SharePermissions FilePermissions.DELETE: 
-        Delete any file in the share.
-        Note: You cannot grant permissions to delete a share with a service SAS. Use 
-        an account SAS instead.
-    :ivar SharePermissions FilePermissions.LIST: 
-        List files and directories in the share.
-    :ivar SharePermissions FilePermissions.READ:
-        Read the content, properties or metadata of any file in the share. Use any 
-        file in the share as the source of a copy operation.
-    :ivar SharePermissions FilePermissions.WRITE: 
-        For any file in the share, create or write content, properties or metadata. 
-        Resize the file. Use the file as the destination of a copy operation within 
-        the same account.
-        Note: You cannot grant permissions to read or write share properties or 
-        metadata with a service SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties or metadata of any file in the share. Use any 
-            file in the share as the source of a copy operation.
-        :param bool write: 
-            For any file in the share, create or write content, properties or metadata. 
-            Resize the file. Use the file as the destination of a copy operation within 
-            the same account.
-            Note: You cannot grant permissions to read or write share properties or 
-            metadata with a service SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any file in the share.
-            Note: You cannot grant permissions to delete a share with a service SAS. Use 
-            an account SAS instead.
-        :param bool list: 
-            List files and directories in the share.
-        :param str _str: 
-            A string representing the permissions
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-
-    def __or__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else ''))
-
-
-SharePermissions.DELETE = SharePermissions(delete=True)
-SharePermissions.LIST = SharePermissions(list=True)
-SharePermissions.READ = SharePermissions(read=True)
-SharePermissions.WRITE = SharePermissions(write=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/file/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/file/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,188 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-)
-from ..common._common_conversion import (
-    _to_str,
-)
-from ._constants import X_MS_VERSION
-
-
-class FileSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating file and share access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_file(self, share_name, directory_name=None, file_name=None,
-                      permission=None, expiry=None, start=None, id=None,
-                      ip=None, protocol=None, cache_control=None,
-                      content_disposition=None, content_encoding=None,
-                      content_language=None, content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = share_name
-        if directory_name is not None:
-            resource_path += '/' + _to_str(directory_name)
-        resource_path += '/' + _to_str(file_name)
-
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('f')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path)
-
-        return sas.get_token()
-
-    def generate_share(self, share_name, permission=None, expiry=None,
-                       start=None, id=None, ip=None, protocol=None,
-                       cache_control=None, content_disposition=None,
-                       content_encoding=None, content_language=None,
-                       content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('s')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name)
-
-        return sas.get_token()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/__init__.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from .models import (
-    Queue,
-    QueueMessage,
-    QueuePermissions,
-    QueueMessageFormat,
-)
-
-from .queueservice import QueueService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_constants.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,11 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '1.2.0rc1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2017-11-09'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,150 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    Queue,
-    QueueMessage,
-)
-from ..common.models import (
-    _list,
-)
-from ..common._deserialization import (
-    _to_int,
-    _parse_metadata,
-)
-from ._encryption import (
-    _decrypt_queue_message,
-)
-
-
-def _parse_metadata_and_message_count(response):
-    '''
-    Extracts approximate messages count header.
-    '''
-    metadata = _parse_metadata(response)
-    metadata.approximate_message_count = _to_int(response.headers.get('x-ms-approximate-messages-count'))
-
-    return metadata
-
-
-def _parse_queue_message_from_headers(response):
-    '''
-    Extracts pop receipt and time next visible from headers.
-    '''
-    message = QueueMessage()
-    message.pop_receipt = response.headers.get('x-ms-popreceipt')
-    message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible'))
-
-    return message
-
-
-def _convert_xml_to_queues(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Queues>
-        <Queue>
-          <Name>string-value</Name>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Queue>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    queues = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(queues, 'next_marker', next_marker)
-
-    queues_element = list_element.find('Queues')
-
-    for queue_element in queues_element.findall('Queue'):
-        # Name element
-        queue = Queue()
-        queue.name = queue_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = queue_element.find('Metadata')
-        if metadata_root_element is not None:
-            queue.metadata = dict()
-            for metadata_element in metadata_root_element:
-                queue.metadata[metadata_element.tag] = metadata_element.text
-
-        # Add queue to list
-        queues.append(queue)
-
-    return queues
-
-
-def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver,
-                                   content=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessagesList>
-        <QueueMessage>
-          <MessageId>string-message-id</MessageId>
-          <InsertionTime>insertion-time</InsertionTime>
-          <ExpirationTime>expiration-time</ExpirationTime>
-          <PopReceipt>opaque-string-receipt-data</PopReceipt>
-          <TimeNextVisible>time-next-visible</TimeNextVisible>
-          <DequeueCount>integer</DequeueCount>
-          <MessageText>message-body</MessageText>
-        </QueueMessage>
-    </QueueMessagesList>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    messages = list()
-    list_element = ETree.fromstring(response.body)
-
-    for message_element in list_element.findall('QueueMessage'):
-        message = QueueMessage()
-
-        message.id = message_element.findtext('MessageId')
-
-        dequeue_count = message_element.findtext('DequeueCount')
-        if dequeue_count is not None:
-            message.dequeue_count = _to_int(dequeue_count)
-
-        # content is not returned for put_message
-        if content is not None:
-            message.content = content
-        else:
-            message.content = message_element.findtext('MessageText')
-            if (key_encryption_key is not None) or (resolver is not None):
-                message.content = _decrypt_queue_message(message.content, require_encryption,
-                                                         key_encryption_key, resolver)
-            message.content = decode_function(message.content)
-
-        message.insertion_time = parser.parse(message_element.findtext('InsertionTime'))
-        message.expiration_time = parser.parse(message_element.findtext('ExpirationTime'))
-
-        message.pop_receipt = message_element.findtext('PopReceipt')
-
-        time_next_visible = message_element.find('TimeNextVisible')
-        if time_next_visible is not None:
-            message.time_next_visible = parser.parse(time_next_visible.text)
-
-        # Add message to list
-        messages.append(message)
-
-    return messages
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_encryption.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-from json import (
-    dumps,
-    loads,
-)
-
-from azure.common import (
-    AzureException,
-)
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes
-)
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _dict_to_encryption_data,
-    _generate_AES_CBC_cipher,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm,
-)
-from ..common._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-)
-from ._error import (
-    _ERROR_MESSAGE_NOT_ENCRYPTED
-)
-
-
-def _encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). 
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should 
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED)
-        else:
-            return message
-    try:
-        return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception:
-        raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-
-def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_error.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-from ..common._error import (
-    _validate_type_bytes,
-)
-
-_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.'
-_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.'
-_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.'
-_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.'
-
-def _validate_message_type_text(param):
-    if sys.version_info < (3,):
-        if not isinstance(param, unicode):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE)
-    else:
-        if not isinstance(param, str):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR)
-
-
-def _validate_message_type_bytes(param):
-    _validate_type_bytes('message', param)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_serialization.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,73 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    try:
-        from cStringIO import StringIO as BytesIO
-    except:
-        from StringIO import StringIO as BytesIO
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ..common._common_conversion import (
-    _str,
-)
-from ._encryption import (
-    _encrypt_queue_message,
-)
-
-
-def _get_path(queue_name=None, include_messages=None, message_id=None):
-    '''
-    Creates the path to access a queue resource.
-
-    queue_name:
-        Name of queue.
-    include_messages:
-        Whether or not to include messages.
-    message_id:
-        Message id.
-    '''
-    if queue_name and include_messages and message_id:
-        return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
-    if queue_name and include_messages:
-        return '/{0}/messages'.format(_str(queue_name))
-    elif queue_name:
-        return '/{0}'.format(_str(queue_name))
-    else:
-        return '/'
-
-
-def _convert_queue_message_xml(message_text, encode_function, key_encryption_key):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessage>
-        <MessageText></MessageText>
-    </QueueMessage>
-    '''
-    queue_message_element = ETree.Element('QueueMessage')
-
-    # Enabled
-    message_text = encode_function(message_text)
-    if key_encryption_key is not None:
-        message_text = _encrypt_queue_message(message_text, key_encryption_key)
-    ETree.SubElement(queue_message_element, 'MessageText').text = message_text
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-        output = stream.getvalue()
-    finally:
-        stream.close()
-
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/models.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/models.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,239 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from base64 import (
-    b64encode,
-    b64decode,
-)
-from xml.sax.saxutils import escape as xml_escape
-from xml.sax.saxutils import unescape as xml_unescape
-
-from ._error import (
-    _validate_message_type_bytes,
-    _validate_message_type_text,
-    _ERROR_MESSAGE_NOT_BASE64,
-)
-
-
-class Queue(object):
-    '''
-    Queue class.
-     
-    :ivar str name: 
-        The name of the queue.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the queue as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list queues operation. If this parameter was specified but the 
-        queue has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self):
-        self.name = None
-        self.metadata = None
-
-
-class QueueMessage(object):
-    ''' 
-    Queue message class. 
-
-    :ivar str id: 
-        A GUID value assigned to the message by the Queue service that 
-        identifies the message in the queue. This value may be used together 
-        with the value of pop_receipt to delete a message from the queue after 
-        it has been retrieved with the get messages operation. 
-    :ivar date insertion_time: 
-        A UTC date value representing the time the messages was inserted.
-    :ivar date expiration_time: 
-        A UTC date value representing the time the message expires.
-    :ivar int dequeue_count: 
-        Begins with a value of 1 the first time the message is dequeued. This 
-        value is incremented each time the message is subsequently dequeued.
-    :ivar obj content: 
-        The message content. Type is determined by the decode_function set on 
-        the service. Default is str.
-    :ivar str pop_receipt: 
-        A receipt str which can be used together with the message_id element to 
-        delete a message from the queue after it has been retrieved with the get 
-        messages operation. Only returned by get messages operations. Set to 
-        None for peek messages.
-    :ivar date time_next_visible: 
-        A UTC date value representing the time the message will next be visible. 
-        Only returned by get messages operations. Set to None for peek messages.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.insertion_time = None
-        self.expiration_time = None
-        self.dequeue_count = None
-        self.content = None
-        self.pop_receipt = None
-        self.time_next_visible = None
-
-
-class QueueMessageFormat:
-    ''' 
-    Encoding and decoding methods which can be used to modify how the queue service 
-    encodes and decodes queue messages. Set these to queueservice.encode_function 
-    and queueservice.decode_function to modify the behavior. The defaults are 
-    text_xmlencode and text_xmldecode, respectively.
-    '''
-
-    @staticmethod
-    def text_base64encode(data):
-        '''
-        Base64 encode unicode text.
-        
-        :param str data: String to encode.
-        :return: Base64 encoded string.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return b64encode(data.encode('utf-8')).decode('utf-8')
-
-    @staticmethod
-    def text_base64decode(data):
-        '''
-        Base64 decode to unicode text.
-        
-        :param str data: String data to decode to unicode.
-        :return: Base64 decoded string.
-        :rtype: str
-        '''
-        try:
-            return b64decode(data.encode('utf-8')).decode('utf-8')
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def binary_base64encode(data):
-        '''
-        Base64 encode byte strings.
-        
-        :param str data: Binary string to encode.
-        :return: Base64 encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_bytes(data)
-        return b64encode(data).decode('utf-8')
-
-    @staticmethod
-    def binary_base64decode(data):
-        '''
-        Base64 decode to byte string.
-        
-        :param str data: Data to decode to a byte string.
-        :return: Base64 decoded data.
-        :rtype: str
-        '''
-        try:
-            return b64decode(data.encode('utf-8'))
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def text_xmlencode(data):
-        ''' 
-        XML encode unicode text.
-
-        :param str data: Unicode string to encode
-        :return: XML encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return xml_escape(data)
-
-    @staticmethod
-    def text_xmldecode(data):
-        ''' 
-        XML decode to unicode text.
-
-        :param str data: Data to decode to unicode.
-        :return: XML decoded data.
-        :rtype: str
-        '''
-        return xml_unescape(data)
-
-    @staticmethod
-    def noencode(data):
-        ''' 
-        Do no encoding. 
-
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str
-        '''
-        return data
-
-    @staticmethod
-    def nodecode(data):
-        '''
-        Do no decoding.
-        
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str        
-        '''
-        return data
-
-
-class QueuePermissions(object):
-    '''
-    QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature`
-    method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. 
-
-    :ivar QueuePermissions QueuePermissions.READ: 
-        Read metadata and properties, including message count. Peek at messages. 
-    :ivar QueuePermissions QueuePermissions.ADD: 
-        Add messages to the queue.
-    :ivar QueuePermissions QueuePermissions.UPDATE:
-        Update messages in the queue. Note: Use the Process permission with 
-        Update so you can first get the message you want to update.
-    :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities.
-        Get and delete messages from the queue. 
-    '''
-
-    def __init__(self, read=False, add=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Read metadata and properties, including message count. Peek at messages.
-        :param bool add:
-            Add messages to the queue.
-        :param bool update:
-            Update messages in the queue. Note: Use the Process permission with 
-            Update so you can first get the message you want to update.
-        :param bool process: 
-            Get and delete messages from the queue.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-QueuePermissions.READ = QueuePermissions(read=True)
-QueuePermissions.ADD = QueuePermissions(add=True)
-QueuePermissions.UPDATE = QueuePermissions(update=True)
-QueuePermissions.PROCESS = QueuePermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/queueservice.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/queueservice.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/queueservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/queueservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,995 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.common import (
-    AzureConflictHttpError,
-    AzureHttpError,
-)
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _convert_xml_to_service_stats,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_CONFLICT,
-    _ERROR_STORAGE_MISSING_INFO,
-    _validate_access_policies,
-    _validate_encryption_required,
-    _validate_decryption_required,
-)
-from ..common._http import (
-    HTTPRequest,
-)
-from ..common._serialization import (
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from ..common._serialization import (
-    _get_request_body,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    QueueSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_queues,
-    _convert_xml_to_queue_messages,
-    _parse_queue_message_from_headers,
-    _parse_metadata_and_message_count,
-)
-from ._serialization import (
-    _convert_queue_message_xml,
-    _get_path,
-)
-from .models import (
-    QueueMessageFormat,
-)
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-_HTTP_RESPONSE_NO_CONTENT = 204
-
-
-class QueueService(StorageClient):
-    '''
-    This is the main class managing queue resources.
-
-    The Queue service stores messages. A queue can contain an unlimited number of 
-    messages, each of which can be up to 64KB in size. Messages are generally added 
-    to the end of the queue and retrieved from the front of the queue, although 
-    first in, first out (FIFO) behavior is not guaranteed.
-
-    :ivar function(data) encode_function: 
-        A function used to encode queue messages. Takes as 
-        a parameter the data passed to the put_message API and returns the encoded 
-        message. Defaults to take text and xml encode, but bytes and other 
-        encodings can be used. For example, base64 may be preferable for developing 
-        across multiple Azure Storage libraries in different languages. See the 
-        :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and 
-        no encoding methods as well as binary equivalents.
-    :ivar function(data) decode_function: 
-        A function used to encode decode messages. Takes as 
-        a parameter the data returned by the get_messages and peek_messages APIs and 
-        returns the decoded message. Defaults to return text and xml decode, but 
-        bytes and other decodings can be used. For example, base64 may be preferable 
-        for developing across multiple Azure Storage libraries in different languages. 
-        See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 
-        and no decoding methods as well as binary equivalents.
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required 
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, request_session=None,
-                 connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'queue',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            token_credential=token_credential,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(QueueService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-                self.is_emulated
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        elif self.token_credential:
-            self.authentication = self.token_credential
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        self.encode_function = QueueMessageFormat.text_xmlencode
-        self.decode_function = QueueMessageFormat.text_xmldecode
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self.require_encryption = False
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue service.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = QueueSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.QUEUE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_queue_shared_access_signature(self, queue_name,
-                                               permission=None,
-                                               expiry=None,
-                                               start=None,
-                                               id=None,
-                                               ip=None, protocol=None, ):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            The name of the queue to create a SAS token for.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_queue_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = QueueSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_queue(
-            queue_name,
-            permission=permission,
-            expiry=expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-        )
-
-    def get_queue_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Queue service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The queue service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def get_queue_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Queue service, including
-        logging, analytics and CORS rules.
-
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The queue service properties.
-        :rtype: :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def set_queue_service_properties(self, logging=None, hour_metrics=None,
-                                     minute_metrics=None, cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Queue service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved. 
-        For more information on Azure Storage Analytics, see 
-        https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
-
-        :param Logging logging:
-            The logging settings provide request logs.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for queuess.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for queues.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service. For detailed information 
-            about CORS rules and evaluation logic, see 
-            https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
-        self._perform_request(request)
-
-    def list_queues(self, prefix=None, num_results=None, include_metadata=False,
-                    marker=None, timeout=None):
-        '''
-        Returns a generator to list the queues. The generator will lazily follow 
-        the continuation tokens returned by the service and stop when all queues 
-        have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        queues, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param int num_results:
-            The maximum number of queues to return.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        '''
-        include = 'metadata' if include_metadata else None
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include,
-                  'marker': marker, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_queues(**kwargs)
-
-        return ListGenerator(resp, self._list_queues, (), kwargs)
-
-    def _list_queues(self, prefix=None, marker=None, max_results=None,
-                     include=None, timeout=None, _context=None):
-        '''
-        Returns a list of queues under the specified account. Makes a single list 
-        request to the service. Used internally by the list_queues method.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param str marker:
-            A token which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            queues. The marker value is opaque to the client.
-        :param int max_results:
-            The maximum number of queues to return. A single list request may 
-            return up to 1000 queues and potentially a continuation token which 
-            should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queues, operation_context=_context)
-
-    def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a queue under the given account.
-
-        :param str queue_name:
-            The name of the queue to create. A queue name must be from 3 through 
-            63 characters long and may only contain lowercase letters, numbers, 
-            and the dash (-) character. The first and last letters in the queue 
-            must be alphanumeric. The dash (-) character cannot be the first or 
-            last character. Consecutive dash characters are not permitted in the 
-            queue name.
-        :param metadata:
-            A dict containing name-value pairs to associate with the queue as 
-            metadata. Note that metadata names preserve the case with which they 
-            were created, but are case-insensitive when set or read. 
-        :type metadata: dict(str, str)
-        :param bool fail_on_exist:
-            Specifies whether to throw an exception if the queue already exists.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was created. If fail_on_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        _add_metadata_headers(metadata, request)
-
-        def _return_request(request):
-            return request
-
-        if not fail_on_exist:
-            try:
-                response = self._perform_request(request, parser=_return_request)
-                if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                    return False
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            response = self._perform_request(request, parser=_return_request)
-            if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                raise AzureConflictHttpError(
-                    _ERROR_CONFLICT.format(response.message), response.status)
-            return True
-
-    def delete_queue(self, queue_name, fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion 
-        and is no longer accessible to clients. The queue is later removed from 
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete. 
-        If an operation is attempted against the queue while it was being deleted, 
-        an :class:`AzureConflictHttpError` will be thrown.
-
-        :param str queue_name:
-            The name of the queue to delete.
-        :param bool fail_not_exist:
-            Specifies whether to throw an exception if the queue doesn't exist.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was deleted. If fail_not_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        if not fail_not_exist:
-            try:
-                self._perform_request(request)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_queue_metadata(self, queue_name, timeout=None):
-        '''
-        Retrieves user-defined metadata and queue properties on the specified
-        queue. Metadata is associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A dictionary representing the queue metadata with an 
-            approximate_message_count int property on the dict estimating the 
-            number of messages in the queue.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _parse_metadata_and_message_count)
-
-    def set_queue_metadata(self, queue_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata on the specified queue. Metadata is
-        associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param dict metadata:
-            A dict containing name-value pairs to associate with the
-            queue as metadata.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def exists(self, queue_name, timeout=None):
-        '''
-        Returns a boolean indicating whether the queue exists.
-
-        :param str queue_name:
-            The name of queue to check for existence.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A boolean indicating whether the queue exists.
-        :rtype: bool
-        '''
-        try:
-            self.get_queue_metadata(queue_name, timeout=timeout)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def get_queue_acl(self, queue_name, timeout=None):
-        '''
-        Returns details about any stored access policies specified on the
-        queue that may be used with Shared Access Signatures.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the queue.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets stored access policies for the queue that may be used with Shared 
-        Access Signatures. 
-        
-        When you set permissions for a queue, the existing permissions are replaced. 
-        To update the queue's permissions, call :func:`~get_queue_acl` to fetch 
-        all access policies associated with the queue, modify the access policy 
-        that you wish to change, and then call this function with the complete 
-        set of data to perform the update.
-
-        When you establish a stored access policy on a queue, it may take up to 
-        30 seconds to take effect. During this interval, a shared access signature 
-        that is associated with the stored access policy will throw an 
-        :class:`AzureHttpError` until the access policy becomes active.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the queue. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-        self._perform_request(request)
-
-    def put_message(self, queue_name, content, visibility_timeout=None,
-                    time_to_live=None, timeout=None):
-        '''
-        Adds a new message to the back of the message queue. 
-
-        The visibility timeout specifies the time that the message will be 
-        invisible. After the timeout expires, the message will become visible. 
-        If a visibility timeout is not specified, the default value of 0 is used.
-
-        The message time-to-live specifies how long a message will remain in the 
-        queue. The message will be deleted from the queue when the time-to-live 
-        period expires.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param str queue_name:
-            The name of the queue to put the message into.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str. The encoded message can be up to 
-            64KB in size.
-        :param int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :param int time_to_live:
-            Specifies the time-to-live interval for the message, in
-            seconds. The time-to-live may be any positive number or -1 for infinity. If this
-            parameter is omitted, the default time-to-live is 7 days.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.models.QueueMessage` object.
-            This object is also populated with the content although it is not
-            returned from the service.
-        :rtype: :class:`~azure.storage.queue.models.QueueMessage`
-        '''
-
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('content', content)
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'visibilitytimeout': _to_str(visibility_timeout),
-            'messagettl': _to_str(time_to_live),
-            'timeout': _int_to_str(timeout)
-        }
-
-        request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
-                                                                    self.key_encryption_key))
-
-        message_list = self._perform_request(request, _convert_xml_to_queue_messages,
-                                             [self.decode_function, False,
-                                              None, None, content])
-        return message_list[0]
-
-    def get_messages(self, queue_name, num_messages=None,
-                     visibility_timeout=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue.
-
-        When a message is retrieved from the queue, the response includes the message 
-        content and a pop_receipt value, which is required to delete the message. 
-        The message is not automatically deleted from the queue, but after it has 
-        been retrieved, it is not visible to other clients for the time interval 
-        specified by the visibility_timeout parameter.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :param str queue_name:
-            The name of the queue to get messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to retrieve from the queue, up to a maximum of 32. If
-            fewer are visible, the visible messages are returned. By default,
-            a single message is retrieved from the queue with this operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds, relative
-            to server time. The new value must be larger than or equal to 1
-            second, and cannot be larger than 7 days. The visibility timeout of 
-            a message can be set to a value later than the expiry time.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-        _validate_decryption_required(self.require_encryption, self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'numofmessages': _to_str(num_messages),
-            'visibilitytimeout': _to_str(visibility_timeout),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queue_messages,
-                                     [self.decode_function, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def peek_messages(self, queue_name, num_messages=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue, but does
-        not alter the visibility of the message.
-
-        Only messages that are visible may be retrieved. When a message is retrieved 
-        for the first time with a call to get_messages, its dequeue_count property 
-        is set to 1. If it is not deleted and is subsequently retrieved again, the 
-        dequeue_count property is incremented. The client may use this value to 
-        determine how many times a message has been retrieved. Note that a call 
-        to peek_messages does not increment the value of DequeueCount, but returns 
-        this value for the client to read.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :param str queue_name:
-            The name of the queue to peek messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to peek from the queue, up to a maximum of 32. By default,
-            a single message is peeked from the queue with this operation.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that 
-            time_next_visible and pop_receipt will not be populated as peek does 
-            not pop the message and can only retrieve already visible messages.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-
-        _validate_decryption_required(self.require_encryption, self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'peekonly': 'true',
-            'numofmessages': _to_str(num_messages),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queue_messages,
-                                     [self.decode_function, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def delete_message(self, queue_name, message_id, pop_receipt, timeout=None):
-        '''
-        Deletes the specified message.
-
-        Normally after a client retrieves a message with the get_messages operation, 
-        the client is expected to process and delete the message. To delete the 
-        message, you must have two items of data: id and pop_receipt. The 
-        id is returned from the previous get_messages operation. The 
-        pop_receipt is returned from the most recent :func:`~get_messages` or 
-        :func:`~update_message` operation. In order for the delete_message operation 
-        to succeed, the pop_receipt specified on the request must match the 
-        pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` 
-        operation. 
-
-        :param str queue_name:
-            The name of the queue from which to delete the message.
-        :param str message_id:
-            The message id identifying the message to delete.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message`.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = {
-            'popreceipt': _to_str(pop_receipt),
-            'timeout': _int_to_str(timeout)
-        }
-        self._perform_request(request)
-
-    def clear_messages(self, queue_name, timeout=None):
-        '''
-        Deletes all messages from the specified queue.
-
-        :param str queue_name:
-            The name of the queue whose messages to clear.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {'timeout': _int_to_str(timeout)}
-        self._perform_request(request)
-
-    def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout,
-                       content=None, timeout=None):
-        '''
-        Updates the visibility timeout of a message. You can also use this
-        operation to update the contents of a message.
-
-        This operation can be used to continually extend the invisibility of a 
-        queue message. This functionality can be useful if you want a worker role 
-        to "lease" a queue message. For example, if a worker role calls get_messages 
-        and recognizes that it needs more time to process a message, it can 
-        continually extend the message's invisibility until it is processed. If 
-        the worker role were to fail during processing, eventually the message 
-        would become visible again and another worker role could process it.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param str queue_name:
-            The name of the queue containing the message to update.
-        :param str message_id:
-            The message id identifying the message to update.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message` operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds,
-            relative to server time. The new value must be larger than or equal
-            to 0, and cannot be larger than 7 days. The visibility timeout of a
-            message cannot be set to a value later than the expiry time. A
-            message can be updated until it has been deleted or has expired.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. For convenience,
-            this object is also populated with the content, although it is not returned by the service.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        _validate_not_none('visibility_timeout', visibility_timeout)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = {
-            'popreceipt': _to_str(pop_receipt),
-            'visibilitytimeout': _int_to_str(visibility_timeout),
-            'timeout': _int_to_str(timeout)
-        }
-
-        if content is not None:
-            request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
-                                                                        self.key_encryption_key))
-
-        return self._perform_request(request, _parse_queue_message_from_headers)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2017_11_09/queue/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2017_11_09/queue/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,81 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-)
-from ._constants import X_MS_VERSION
-
-
-class QueueSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating queue shares access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_queue(self, queue_name, permission=None,
-                       expiry=None, start=None, id=None,
-                       ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            Name of queue.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, add, update, process.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name)
-
-        return sas.get_token()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/__init__.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/__init__.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,31 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from .appendblobservice import AppendBlobService
-from .blockblobservice import BlockBlobService
-from .models import (
-    Container,
-    ContainerProperties,
-    Blob,
-    BlobProperties,
-    BlobBlock,
-    BlobBlockList,
-    PageRange,
-    ContentSettings,
-    CopyProperties,
-    ContainerPermissions,
-    BlobPermissions,
-    _LeaseActions,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    Include,
-    SequenceNumberAction,
-    BlockListType,
-    PublicAccess,
-    BlobPrefix,
-    DeleteSnapshot,
-)
-from .pageblobservice import PageBlobService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_constants.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '2.0.1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2018-11-09'
-
-# internal configurations, should not be changed
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,556 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.common import AzureException
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from ..common._common_conversion import (
-    _decode_base64_to_text,
-    _to_str,
-    _get_content_md5
-)
-from ..common._deserialization import (
-    _parse_properties,
-    _to_int,
-    _parse_metadata,
-    _convert_xml_to_signed_identifiers,
-    _bool,
-)
-from .models import (
-    Container,
-    Blob,
-    BlobBlock,
-    BlobBlockList,
-    BlobBlockState,
-    BlobProperties,
-    PageRange,
-    ContainerProperties,
-    AppendBlockProperties,
-    PageBlobProperties,
-    ResourceProperties,
-    BlobPrefix,
-    AccountInformation,
-    UserDelegationKey,
-)
-from ._encryption import _decrypt_blob
-from ..common.models import _list
-from ..common._error import (
-    _validate_content_match,
-    _ERROR_DECRYPTION_FAILURE,
-)
-
-
-def _parse_base_properties(response):
-    '''
-    Extracts basic response headers.
-    '''
-    resource_properties = ResourceProperties()
-    resource_properties.last_modified = parser.parse(response.headers.get('last-modified'))
-    resource_properties.etag = response.headers.get('etag')
-
-    return resource_properties
-
-
-def _parse_page_properties(response):
-    '''
-    Extracts page response headers.
-    '''
-    put_page = PageBlobProperties()
-    put_page.last_modified = parser.parse(response.headers.get('last-modified'))
-    put_page.etag = response.headers.get('etag')
-    put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number'))
-
-    return put_page
-
-
-def _parse_append_block(response):
-    '''
-    Extracts append block response headers.
-    '''
-    append_block = AppendBlockProperties()
-    append_block.last_modified = parser.parse(response.headers.get('last-modified'))
-    append_block.etag = response.headers.get('etag')
-    append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset'))
-    append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count'))
-
-    return append_block
-
-
-def _parse_snapshot_blob(response, name):
-    '''
-    Extracts snapshot return header.
-    '''
-    snapshot = response.headers.get('x-ms-snapshot')
-
-    return _parse_blob(response, name, snapshot)
-
-
-def _parse_lease(response):
-    '''
-    Extracts lease time and ID return headers.
-    '''
-    lease = {'time': response.headers.get('x-ms-lease-time')}
-    if lease['time']:
-        lease['time'] = _to_int(lease['time'])
-
-    lease['id'] = response.headers.get('x-ms-lease-id')
-
-    return lease
-
-
-def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False,
-                key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, BlobProperties)
-
-    # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5
-    content_settings = getattr(props, 'content_settings')
-    if 'content-range' in response.headers:
-        if 'x-ms-blob-content-md5' in response.headers:
-            setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5']))
-        else:
-            delattr(content_settings, 'content_md5')
-
-    if validate_content:
-        computed_md5 = _get_content_md5(response.body)
-        _validate_content_match(response.headers['content-md5'], computed_md5)
-
-    if key_encryption_key is not None or key_resolver_function is not None:
-        try:
-            response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function,
-                                          response, start_offset, end_offset)
-        except:
-            raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-    return Blob(name, snapshot, response.body, props, metadata)
-
-
-def _parse_container(response, name):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ContainerProperties)
-    return Container(name, props, metadata)
-
-
-def _convert_xml_to_signed_identifiers_and_access(response):
-    acl = _convert_xml_to_signed_identifiers(response)
-    acl.public_access = response.headers.get('x-ms-blob-public-access')
-
-    return acl
-
-
-def _convert_xml_to_containers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.blob.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Containers>
-        <Container>
-          <Name>container-name</Name>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <LeaseStatus>locked | unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <PublicAccess>blob | container</PublicAccess>
-            <HasImmutabilityPolicy>true | false</HasImmutabilityPolicy>
-            <HasLegalHold>true | false</HasLegalHold>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Container>
-      </Containers>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    containers = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    setattr(containers, 'next_marker', list_element.findtext('NextMarker'))
-
-    containers_element = list_element.find('Containers')
-
-    for container_element in containers_element.findall('Container'):
-        # Name element
-        container = Container()
-        container.name = container_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = container_element.find('Metadata')
-        if metadata_root_element is not None:
-            container.metadata = dict()
-            for metadata_element in metadata_root_element:
-                container.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = container_element.find('Properties')
-        container.properties.etag = properties_element.findtext('Etag')
-        container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        container.properties.lease_status = properties_element.findtext('LeaseStatus')
-        container.properties.lease_state = properties_element.findtext('LeaseState')
-        container.properties.lease_duration = properties_element.findtext('LeaseDuration')
-        container.properties.public_access = properties_element.findtext('PublicAccess')
-        container.properties.has_immutability_policy = properties_element.findtext('HasImmutabilityPolicy')
-        container.properties.has_legal_hold = properties_element.findtext('HasLegalHold')
-
-        # Add container to list
-        containers.append(container)
-
-    return containers
-
-
-LIST_BLOBS_ATTRIBUTE_MAP = {
-    'Last-Modified': (None, 'last_modified', parser.parse),
-    'Etag': (None, 'etag', _to_str),
-    'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int),
-    'BlobType': (None, 'blob_type', _to_str),
-    'Content-Length': (None, 'content_length', _to_int),
-    'ServerEncrypted': (None, 'server_encrypted', _bool),
-    'Content-Type': ('content_settings', 'content_type', _to_str),
-    'Content-Encoding': ('content_settings', 'content_encoding', _to_str),
-    'Content-Disposition': ('content_settings', 'content_disposition', _to_str),
-    'Content-Language': ('content_settings', 'content_language', _to_str),
-    'Content-MD5': ('content_settings', 'content_md5', _to_str),
-    'Cache-Control': ('content_settings', 'cache_control', _to_str),
-    'LeaseStatus': ('lease', 'status', _to_str),
-    'LeaseState': ('lease', 'state', _to_str),
-    'LeaseDuration': ('lease', 'duration', _to_str),
-    'CopyId': ('copy', 'id', _to_str),
-    'CopySource': ('copy', 'source', _to_str),
-    'CopyStatus': ('copy', 'status', _to_str),
-    'CopyProgress': ('copy', 'progress', _to_str),
-    'CopyCompletionTime': ('copy', 'completion_time', _to_str),
-    'CopyStatusDescription': ('copy', 'status_description', _to_str),
-    'AccessTier': (None, 'blob_tier', _to_str),
-    'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse),
-    'AccessTierInferred': (None, 'blob_tier_inferred', _bool),
-    'ArchiveStatus': (None, 'rehydration_status', _to_str),
-    'DeletedTime': (None, 'deleted_time', parser.parse),
-    'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int),
-    'Creation-Time': (None, 'creation_time', parser.parse),
-}
-
-
-def _convert_xml_to_blob_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Delimiter>string-value</Delimiter>
-      <Blobs>
-        <Blob>
-          <Name>blob-name</name>
-          <Deleted>true</Deleted>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date-time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Content-Length>size-in-bytes</Content-Length>
-            <Content-Type>blob-content-type</Content-Type>
-            <Content-Encoding />
-            <Content-Language />
-            <Content-MD5 />
-            <Cache-Control />
-            <x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
-            <BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
-            <LeaseStatus>locked|unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <CopyId>id</CopyId>
-            <CopyStatus>pending | success | aborted | failed </CopyStatus>
-            <CopySource>source url</CopySource>
-            <CopyProgress>bytes copied/bytes total</CopyProgress>
-            <CopyCompletionTime>datetime</CopyCompletionTime>
-            <CopyStatusDescription>error string</CopyStatusDescription>
-            <AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
-            <AccessTierChangeTime>date-time-value</AccessTierChangeTime>
-            <AccessTierInferred>true</AccessTierInferred>
-            <DeletedTime>datetime</DeletedTime>
-            <RemainingRetentionDays>int</RemainingRetentionDays>
-            <Creation-Time>date-time-value</Creation-Time>
-          </Properties>
-          <Metadata>   
-            <Name>value</Name>
-          </Metadata>
-        </Blob>
-        <BlobPrefix>
-          <Name>blob-prefix</Name>
-        </BlobPrefix>
-      </Blobs>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    blob_list = _list()
-    list_element = ETree.fromstring(response.body)
-
-    setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
-
-    blobs_element = list_element.find('Blobs')
-    blob_prefix_elements = blobs_element.findall('BlobPrefix')
-    if blob_prefix_elements is not None:
-        for blob_prefix_element in blob_prefix_elements:
-            prefix = BlobPrefix()
-            prefix.name = blob_prefix_element.findtext('Name')
-            blob_list.append(prefix)
-
-    for blob_element in blobs_element.findall('Blob'):
-        blob = Blob()
-        blob.name = blob_element.findtext('Name')
-        blob.snapshot = blob_element.findtext('Snapshot')
-
-        deleted = blob_element.findtext('Deleted')
-        if deleted:
-            blob.deleted = _bool(deleted)
-
-        # Properties
-        properties_element = blob_element.find('Properties')
-        if properties_element is not None:
-            for property_element in properties_element:
-                info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag)
-                if info is None:
-                    setattr(blob.properties, property_element.tag, _to_str(property_element.text))
-                elif info[0] is None:
-                    setattr(blob.properties, info[1], info[2](property_element.text))
-                else:
-                    attr = getattr(blob.properties, info[0])
-                    setattr(attr, info[1], info[2](property_element.text))
-
-        # Metadata
-        metadata_root_element = blob_element.find('Metadata')
-        if metadata_root_element is not None:
-            blob.metadata = dict()
-            for metadata_element in metadata_root_element:
-                blob.metadata[metadata_element.tag] = metadata_element.text
-
-        # Add blob to list
-        blob_list.append(blob)
-
-    return blob_list
-
-
-def _convert_xml_to_blob_name_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Delimiter>string-value</Delimiter>
-      <Blobs>
-        <Blob>
-          <Name>blob-name</name>
-          <Deleted>true</Deleted>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date-time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Content-Length>size-in-bytes</Content-Length>
-            <Content-Type>blob-content-type</Content-Type>
-            <Content-Encoding />
-            <Content-Language />
-            <Content-MD5 />
-            <Cache-Control />
-            <x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
-            <BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
-            <LeaseStatus>locked|unlocked</LeaseStatus>
-            <LeaseState>available | leased | expired | breaking | broken</LeaseState>
-            <LeaseDuration>infinite | fixed</LeaseDuration>
-            <CopyId>id</CopyId>
-            <CopyStatus>pending | success | aborted | failed </CopyStatus>
-            <CopySource>source url</CopySource>
-            <CopyProgress>bytes copied/bytes total</CopyProgress>
-            <CopyCompletionTime>datetime</CopyCompletionTime>
-            <CopyStatusDescription>error string</CopyStatusDescription>
-            <AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
-            <AccessTierChangeTime>date-time-value</AccessTierChangeTime>
-            <AccessTierInferred>true</AccessTierInferred>
-            <DeletedTime>datetime</DeletedTime>
-            <RemainingRetentionDays>int</RemainingRetentionDays>
-            <Creation-Time>date-time-value</Creation-Time>
-          </Properties>
-          <Metadata>   
-            <Name>value</Name>
-          </Metadata>
-        </Blob>
-        <BlobPrefix>
-          <Name>blob-prefix</Name>
-        </BlobPrefix>
-      </Blobs>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    blob_list = _list()
-    list_element = ETree.fromstring(response.body)
-
-    setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
-
-    blobs_element = list_element.find('Blobs')
-    blob_prefix_elements = blobs_element.findall('BlobPrefix')
-    if blob_prefix_elements is not None:
-        for blob_prefix_element in blob_prefix_elements:
-            blob_list.append(blob_prefix_element.findtext('Name'))
-
-    for blob_element in blobs_element.findall('Blob'):
-        blob_list.append(blob_element.findtext('Name'))
-
-    return blob_list
-
-
-def _convert_xml_to_block_list(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <CommittedBlocks>
-         <Block>
-            <Name>base64-encoded-block-id</Name>
-            <Size>size-in-bytes</Size>
-         </Block>
-      </CommittedBlocks>
-      <UncommittedBlocks>
-        <Block>
-          <Name>base64-encoded-block-id</Name>
-          <Size>size-in-bytes</Size>
-        </Block>
-      </UncommittedBlocks>
-     </BlockList>
-
-    Converts xml response to block list class.
-    '''
-    if response is None or response.body is None:
-        return None
-
-    block_list = BlobBlockList()
-
-    list_element = ETree.fromstring(response.body)
-
-    committed_blocks_element = list_element.find('CommittedBlocks')
-    if committed_blocks_element is not None:
-        for block_element in committed_blocks_element.findall('Block'):
-            block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-            block_size = int(block_element.findtext('Size'))
-            block = BlobBlock(id=block_id, state=BlobBlockState.Committed)
-            block._set_size(block_size)
-            block_list.committed_blocks.append(block)
-
-    uncommitted_blocks_element = list_element.find('UncommittedBlocks')
-    if uncommitted_blocks_element is not None:
-        for block_element in uncommitted_blocks_element.findall('Block'):
-            block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
-            block_size = int(block_element.findtext('Size'))
-            block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted)
-            block._set_size(block_size)
-            block_list.uncommitted_blocks.append(block)
-
-    return block_list
-
-
-def _convert_xml_to_page_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <PageList>
-       <PageRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </PageRange> 
-       <ClearRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </ClearRange> 
-       <PageRange> 
-          <Start>Start Byte</Start> 
-          <End>End Byte</End> 
-       </PageRange> 
-    </PageList> 
-    '''
-    if response is None or response.body is None:
-        return None
-
-    page_list = list()
-
-    list_element = ETree.fromstring(response.body)
-
-    for page_range_element in list_element:
-        if page_range_element.tag == 'PageRange':
-            is_cleared = False
-        elif page_range_element.tag == 'ClearRange':
-            is_cleared = True
-        else:
-            pass  # ignore any unrecognized Page Range types
-
-        page_list.append(
-            PageRange(
-                int(page_range_element.findtext('Start')),
-                int(page_range_element.findtext('End')),
-                is_cleared
-            )
-        )
-
-    return page_list
-
-
-def _parse_account_information(response):
-    account_info = AccountInformation()
-    account_info.sku_name = response.headers['x-ms-sku-name']
-    account_info.account_kind = response.headers['x-ms-account-kind']
-
-    return account_info
-
-
-def _convert_xml_to_user_delegation_key(response):
-    """
-    <?xml version="1.0" encoding="utf-8"?>
-    <UserDelegationKey>
-        <SignedOid> Guid </SignedOid>
-        <SignedTid> Guid </SignedTid>
-        <SignedStart> String, formatted ISO Date </SignedStart>
-        <SignedExpiry> String, formatted ISO Date </SignedExpiry>
-        <SignedService>b</SignedService>
-        <SignedVersion> String, rest api version used to create delegation key </SignedVersion>
-        <Value>Ovg+o0K/0/2V8upg7AwlyAPCriEcOSXKuBu2Gv/PU70Y7aWDW3C2ZRmw6kYWqPWBaM1GosLkcSZkgsobAlT+Sw==</value>
-    </UserDelegationKey >
-
-    Converts xml response to UserDelegationKey class.
-    """
-
-    if response is None or response.body is None:
-        return None
-
-    delegation_key = UserDelegationKey()
-
-    key_element = ETree.fromstring(response.body)
-    delegation_key.signed_oid = key_element.findtext('SignedOid')
-    delegation_key.signed_tid = key_element.findtext('SignedTid')
-    delegation_key.signed_start = key_element.findtext('SignedStart')
-    delegation_key.signed_expiry = key_element.findtext('SignedExpiry')
-    delegation_key.signed_service = key_element.findtext('SignedService')
-    delegation_key.signed_version = key_element.findtext('SignedVersion')
-    delegation_key.value = key_element.findtext('Value')
-
-    return delegation_key
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_download_chunking.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_download_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_download_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_download_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,178 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import threading
-
-
-def _download_blob_chunks(blob_service, container_name, blob_name, snapshot,
-                          download_size, block_size, progress, start_range, end_range,
-                          stream, max_connections, progress_callback, validate_content,
-                          lease_id, if_modified_since, if_unmodified_since, if_match,
-                          if_none_match, timeout, operation_context):
-
-    downloader_class = _ParallelBlobChunkDownloader if max_connections > 1 else _SequentialBlobChunkDownloader
-
-    downloader = downloader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        snapshot,
-        download_size,
-        block_size,
-        progress,
-        start_range,
-        end_range,
-        stream,
-        progress_callback,
-        validate_content,
-        lease_id,
-        if_modified_since,
-        if_unmodified_since,
-        if_match,
-        if_none_match,
-        timeout,
-        operation_context,
-    )
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-    else:
-        for chunk in downloader.get_chunk_offsets():
-            downloader.process_chunk(chunk)
-
-
-class _BlobChunkDownloader(object):
-    def __init__(self, blob_service, container_name, blob_name, snapshot, download_size,
-                 chunk_size, progress, start_range, end_range, stream,
-                 progress_callback, validate_content, lease_id, if_modified_since,
-                 if_unmodified_since, if_match, if_none_match, timeout, operation_context):
-        # identifiers for the blob
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.snapshot = snapshot
-
-        # information on the download range/chunk size
-        self.chunk_size = chunk_size
-        self.download_size = download_size
-        self.start_index = start_range
-        self.blob_end = end_range
-
-        # the destination that we will write to
-        self.stream = stream
-
-        # progress related
-        self.progress_callback = progress_callback
-        self.progress_total = progress
-
-        # parameters for each get blob operation
-        self.timeout = timeout
-        self.operation_context = operation_context
-        self.validate_content = validate_content
-        self.lease_id = lease_id
-        self.if_modified_since = if_modified_since
-        self.if_unmodified_since = if_unmodified_since
-        self.if_match = if_match
-        self.if_none_match = if_none_match
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.blob_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.blob_end:
-            chunk_end = self.blob_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    # should be provided by the subclass
-    def _update_progress(self, length):
-        pass
-
-    # should be provided by the subclass
-    def _write_to_stream(self, chunk_data, chunk_start):
-        pass
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        response = self.blob_service._get_blob(
-            self.container_name,
-            self.blob_name,
-            snapshot=self.snapshot,
-            start_range=chunk_start,
-            end_range=chunk_end - 1,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            if_modified_since=self.if_modified_since,
-            if_unmodified_since=self.if_unmodified_since,
-            if_match=self.if_match,
-            if_none_match=self.if_none_match,
-            timeout=self.timeout,
-            _context=self.operation_context
-        )
-
-        # This makes sure that if_match is set so that we can validate 
-        # that subsequent downloads are to an unmodified blob
-        self.if_match = response.properties.etag
-        return response
-
-
-class _ParallelBlobChunkDownloader(_BlobChunkDownloader):
-    def __init__(self, blob_service, container_name, blob_name, snapshot, download_size,
-                 chunk_size, progress, start_range, end_range, stream,
-                 progress_callback, validate_content, lease_id, if_modified_since,
-                 if_unmodified_since, if_match, if_none_match, timeout, operation_context):
-
-        super(_ParallelBlobChunkDownloader, self).__init__(blob_service, container_name, blob_name, snapshot,
-                                                           download_size,
-                                                           chunk_size, progress, start_range, end_range, stream,
-                                                           progress_callback, validate_content, lease_id,
-                                                           if_modified_since,
-                                                           if_unmodified_since, if_match, if_none_match, timeout,
-                                                           operation_context)
-
-        # for a parallel download, the stream is always seekable, so we note down the current position
-        # in order to seek to the right place when out-of-order chunks come in
-        self.stream_start = stream.tell()
-
-        # since parallel operations are going on
-        # it is essential to protect the writing and progress reporting operations
-        self.stream_lock = threading.Lock()
-        self.progress_lock = threading.Lock()
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total_so_far = self.progress_total
-            self.progress_callback(total_so_far, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-            self.stream.write(chunk_data)
-
-
-class _SequentialBlobChunkDownloader(_BlobChunkDownloader):
-    def __init__(self, *args):
-        super(_SequentialBlobChunkDownloader, self).__init__(*args)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            self.progress_total += length
-            self.progress_callback(self.progress_total, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        # chunk_start is ignored in the case of sequential download since we cannot seek the destination stream
-        self.stream.write(chunk_data)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_encryption.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,187 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from json import (
-    dumps,
-    loads,
-)
-from os import urandom
-
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _generate_AES_CBC_cipher,
-    _dict_to_encryption_data,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-    _ERROR_DATA_NOT_ENCRYPTED,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-)
-
-
-def _encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). 
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the _upload_blob_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def _generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-    
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the 
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def _decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                  response, start_offset, end_offset):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-    
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key 
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    _validate_not_none('response', response)
-    content = response.body
-    _validate_not_none('content', content)
-
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata']))
-    except:
-        if require_encryption:
-            raise ValueError(_ERROR_DATA_NOT_ENCRYPTED)
-
-        return content
-
-    if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    blob_type = response.headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    start_range, end_range = 0, len(content)
-    if 'content-range' in response.headers:
-        content_range = response.headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        start_range = int(content_range[0])
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def _get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_error.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,29 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
-    'Invalid page blob size: {0}. ' + \
-    'The size must be aligned to a 512-byte boundary.'
-
-_ERROR_PAGE_BLOB_START_ALIGNMENT = \
-    'start_range must align with 512 page size'
-
-_ERROR_PAGE_BLOB_END_ALIGNMENT = \
-    'end_range must align with 512 page size'
-
-_ERROR_INVALID_BLOCK_ID = \
-    'All blocks in block list need to have valid block ids.'
-
-_ERROR_INVALID_LEASE_DURATION = \
-    "lease_duration param needs to be between 15 and 60 or -1."
-
-_ERROR_INVALID_LEASE_BREAK_PERIOD = \
-    "lease_break_period param needs to be between 0 and 60."
-
-_ERROR_NO_SINGLE_THREAD_CHUNKING = \
-    'To use blob chunk downloader more than 1 thread must be ' + \
-    'used since get_blob_to_bytes should be called for single threaded ' + \
-    'blob downloads.'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_serialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,153 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from xml.sax.saxutils import escape as xml_escape
-from datetime import date
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from ..common._common_conversion import (
-    _encode_base64,
-    _str,
-)
-from ..common._serialization import (
-    _to_utc_datetime,
-)
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-from ._error import (
-    _ERROR_PAGE_BLOB_START_ALIGNMENT,
-    _ERROR_PAGE_BLOB_END_ALIGNMENT,
-    _ERROR_INVALID_BLOCK_ID,
-)
-from io import BytesIO
-
-
-def _get_path(container_name=None, blob_name=None):
-    '''
-    Creates the path to access a blob resource.
-
-    container_name:
-        Name of container.
-    blob_name:
-        The path to the blob.
-    '''
-    if container_name and blob_name:
-        return '/{0}/{1}'.format(
-            _str(container_name),
-            _str(blob_name))
-    elif container_name:
-        return '/{0}'.format(_str(container_name))
-    else:
-        return '/'
-
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
-                                       end_range_required=True, check_content_md5=False, align_to_page=False,
-                                       range_header_name='x-ms-range'):
-    # If end range is provided, start range must be provided
-    if start_range_required or end_range is not None:
-        _validate_not_none('start_range', start_range)
-    if end_range_required:
-        _validate_not_none('end_range', end_range)
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT)
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT)
-
-    # Format based on whether end_range is present
-    request.headers = request.headers or {}
-    if end_range is not None:
-        request.headers[range_header_name] = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        request.headers[range_header_name] = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers['x-ms-range-get-content-md5'] = 'true'
-
-
-def _convert_block_list_to_xml(block_id_list):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <BlockList>
-      <Committed>first-base64-encoded-block-id</Committed>
-      <Uncommitted>second-base64-encoded-block-id</Uncommitted>
-      <Latest>third-base64-encoded-block-id</Latest>
-    </BlockList>
-
-    Convert a block list to xml to send.
-
-    block_id_list:
-        A list of BlobBlock containing the block ids and block state that are used in put_block_list.
-    Only get block from latest blocks.
-    '''
-    if block_id_list is None:
-        return ''
-
-    block_list_element = ETree.Element('BlockList')
-
-    # Enabled
-    for block in block_id_list:
-        if block.id is None:
-            raise ValueError(_ERROR_INVALID_BLOCK_ID)
-        id = xml_escape(_str(format(_encode_base64(block.id))))
-        ETree.SubElement(block_list_element, block.state).text = id
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    # return xml value
-    return output
-
-
-def _convert_delegation_key_info_to_xml(start_time, expiry_time):
-    """
-    <?xml version="1.0" encoding="utf-8"?>
-    <KeyInfo>
-        <Start> String, formatted ISO Date </Start>
-        <Expiry> String, formatted ISO Date </Expiry>
-    </KeyInfo>
-
-    Convert key info to xml to send.
-    """
-    if start_time is None or expiry_time is None:
-        raise ValueError("delegation key start/end times are required")
-
-    key_info_element = ETree.Element('KeyInfo')
-    ETree.SubElement(key_info_element, 'Start').text = \
-        _to_utc_datetime(start_time) if isinstance(start_time, date) else start_time
-    ETree.SubElement(key_info_element, 'Expiry').text = \
-        _to_utc_datetime(expiry_time) if isinstance(expiry_time, date) else expiry_time
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(key_info_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    # return xml value
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_upload_chunking.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_upload_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/_upload_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/_upload_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,496 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from threading import Lock
-
-from math import ceil
-
-from ..common._common_conversion import _encode_base64
-from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-from ..common._serialization import (
-    url_quote,
-    _get_data_bytes_only,
-    _len_plus
-)
-from ._constants import (
-    _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-)
-from ._encryption import (
-    _get_blob_encryptor_and_padder,
-)
-from .models import BlobBlock
-
-
-def _upload_blob_chunks(blob_service, container_name, blob_name,
-                        blob_size, block_size, stream, max_connections,
-                        progress_callback, validate_content, lease_id, uploader_class,
-                        maxsize_condition=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
-                        if_none_match=None, timeout=None,
-                        content_encryption_key=None, initialization_vector=None, resource_properties=None):
-    encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
-                                                       uploader_class is not _PageBlobChunkUploader)
-
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        lease_id,
-        timeout,
-        encryptor,
-        padder
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # Access conditions do not work with parallelism
-    if max_connections > 1:
-        uploader.if_match = uploader.if_none_match = uploader.if_modified_since = uploader.if_unmodified_since = None
-    else:
-        uploader.if_match = if_match
-        uploader.if_none_match = if_none_match
-        uploader.if_modified_since = if_modified_since
-        uploader.if_unmodified_since = if_unmodified_since
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        from threading import BoundedSemaphore
-
-        '''
-        Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
-        This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
-        the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
-        '''
-        chunk_throttler = BoundedSemaphore(max_connections + 1)
-
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        futures = []
-        running_futures = []
-
-        # Check for exceptions and fail fast.
-        for chunk in uploader.get_chunk_streams():
-            for f in running_futures:
-                if f.done():
-                    if f.exception():
-                        raise f.exception()
-                    else:
-                        running_futures.remove(f)
-
-            chunk_throttler.acquire()
-            future = executor.submit(uploader.process_chunk, chunk)
-
-            # Calls callback upon completion (even if the callback was added after the Future task is done).
-            future.add_done_callback(lambda x: chunk_throttler.release())
-            futures.append(future)
-            running_futures.append(future)
-
-        # result() will wait until completion and also raise any exceptions that may have been set.
-        range_ids = [f.result() for f in futures]
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-
-    if resource_properties:
-        resource_properties.last_modified = uploader.last_modified
-        resource_properties.etag = uploader.etag
-
-    return range_ids
-
-
-def _upload_blob_substream_blocks(blob_service, container_name, blob_name,
-                                  blob_size, block_size, stream, max_connections,
-                                  progress_callback, validate_content, lease_id, uploader_class,
-                                  maxsize_condition=None, if_match=None, timeout=None):
-    uploader = uploader_class(
-        blob_service,
-        container_name,
-        blob_name,
-        blob_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        lease_id,
-        timeout,
-        None,
-        None
-    )
-
-    uploader.maxsize_condition = maxsize_condition
-
-    # ETag matching does not work with parallelism as a ranged upload may start
-    # before the previous finishes and provides an etag
-    uploader.if_match = if_match if not max_connections > 1 else None
-
-    if progress_callback is not None:
-        progress_callback(0, blob_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks()))
-    else:
-        range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()]
-
-    return range_ids
-
-
-class _BlobChunkUploader(object):
-    def __init__(self, blob_service, container_name, blob_name, blob_size,
-                 chunk_size, stream, parallel, progress_callback,
-                 validate_content, lease_id, timeout, encryptor, padder):
-        self.blob_service = blob_service
-        self.container_name = container_name
-        self.blob_name = blob_name
-        self.blob_size = blob_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-        self.validate_content = validate_content
-        self.lease_id = lease_id
-        self.timeout = timeout
-        self.encryptor = encryptor
-        self.padder = padder
-        self.last_modified = None
-        self.etag = None
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.blob_size:
-                    read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                temp = _get_data_bytes_only('temp', temp)
-                data += temp
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if len(data) > 0:
-                    yield index, data
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.blob_size)
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.blob_size
-
-        if blob_length is None:
-            blob_length = _len_plus(self.stream)
-            if blob_length is None:
-                raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream'))
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            yield ('BlockId{}'.format("%05d" % i),
-                   _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size,
-                              lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class _BlockBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
-        self.blob_service._put_block(
-            self.container_name,
-            self.blob_name,
-            chunk_data,
-            block_id,
-            validate_content=self.validate_content,
-            lease_id=self.lease_id,
-            timeout=self.timeout,
-        )
-        return BlobBlock(block_id)
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.blob_service._put_block(
-                self.container_name,
-                self.blob_name,
-                block_stream,
-                block_id,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                timeout=self.timeout,
-            )
-        finally:
-            block_stream.close()
-        return BlobBlock(block_id)
-
-
-class _PageBlobChunkUploader(_BlobChunkUploader):
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        for each_byte in chunk_data:
-            if each_byte != 0 and each_byte != b'\x00':
-                return False
-        return True
-
-    def _upload_chunk(self, chunk_start, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_start + len(chunk_data) - 1
-            resp = self.blob_service._update_page(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                chunk_start,
-                chunk_end,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                if_match=self.if_match,
-                timeout=self.timeout,
-            )
-
-            if not self.parallel:
-                self.if_match = resp.etag
-
-            self.set_response_properties(resp)
-
-
-class _AppendBlobChunkUploader(_BlobChunkUploader):
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if not hasattr(self, 'current_length'):
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                timeout=self.timeout,
-                if_modified_since=self.if_modified_since,
-                if_unmodified_since=self.if_unmodified_since,
-                if_match=self.if_match,
-                if_none_match=self.if_none_match
-            )
-
-            self.current_length = resp.append_offset
-        else:
-            resp = self.blob_service.append_block(
-                self.container_name,
-                self.blob_name,
-                chunk_data,
-                validate_content=self.validate_content,
-                lease_id=self.lease_id,
-                maxsize_condition=self.maxsize_condition,
-                appendpos_condition=self.current_length + chunk_offset,
-                timeout=self.timeout,
-            )
-
-        self.set_response_properties(resp)
-
-
-class _SubStream(IOBase):
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \
-            else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, n):
-        if self.closed:
-            raise ValueError("Stream is closed.")
-
-        # adjust if out of bounds
-        if n + self._position >= self._length:
-            n = self._length - self._position
-
-        # return fast
-        if n == 0 or self._buffer.closed:
-            return b''
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(n)
-        bytes_read = len(read_buffer)
-        bytes_remaining = n - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_connections > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = - offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/appendblobservice.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/appendblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/appendblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/appendblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,781 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from ..common._common_conversion import (
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_data_bytes_only,
-    _add_metadata_headers,
-)
-from ._deserialization import (
-    _parse_append_block,
-    _parse_base_properties,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import (
-    _AppendBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-    ResourceProperties
-)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class AppendBlobService(BaseBlobService):
-    '''
-    An append blob is comprised of blocks and is optimized for append operations.
-    When you modify an append blob, blocks are added to the end of the blob only,
-    via the append_block operation. Updating or deleting of existing blocks is not
-    supported. Unlike a block blob, an append blob does not expose its block IDs. 
-
-    Each block in an append blob can be a different size, up to a maximum of 4 MB,
-    and an append blob can include up to 50,000 blocks. The maximum size of an
-    append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
-
-    :ivar int MAX_BLOCK_SIZE: 
-        The size of the blocks put by append_blob_from_* methods. Smaller blocks 
-        may be put if there is less data provided. The maximum block size the service 
-        supports is 4MB.
-    '''
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None,
-                 connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        self.blob_type = _BlobTypes.AppendBlob
-        super(AppendBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout, token_credential)
-
-    def create_blob(self, container_name, blob_name, content_settings=None,
-                    metadata=None, lease_id=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a blob or overrides an existing blob. Use if_none_match=* to
-        prevent overriding an existing blob. 
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to
-            perform the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def append_block(self, container_name, blob_name, block,
-                     validate_content=False, maxsize_condition=None,
-                     appendpos_condition=None,
-                     lease_id=None, if_modified_since=None,
-                     if_unmodified_since=None, if_match=None,
-                     if_none_match=None, timeout=None):
-        '''
-        Commits a new block of data to the end of an existing append blob.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes block:
-            Content of the block in bytes.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            ETag, last modified, append offset, and committed block count 
-            properties for the updated Append Blob
-        :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'appendblock',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
-            'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        request.body = _get_data_bytes_only('block', block)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_append_block)
-
-    def append_block_from_url(self, container_name, blob_name, copy_source_url, source_range_start=None,
-                              source_range_end=None, source_content_md5=None, source_if_modified_since=None,
-                              source_if_unmodified_since=None, source_if_match=None,
-                              source_if_none_match=None, maxsize_condition=None,
-                              appendpos_condition=None, lease_id=None, if_modified_since=None,
-                              if_unmodified_since=None, if_match=None,
-                              if_none_match=None, timeout=None):
-        """
-        Creates a new block to be committed as part of a blob, where the contents are read from a source url.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob.
-        :param str copy_source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int source_range_start:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-        :param int source_range_end:
-            This indicates the end of the range of bytes(inclusive) that has to be taken from the copy source.
-        :param str source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the source resource has been modified since the specified time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the source resource has not been modified since the specified date/time.
-        :param str source_if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the source resource's ETag matches the value specified.
-        :param str source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the source resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the source resource does not exist, and fail the
-            operation if it does exist.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source_url', copy_source_url)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'appendblock',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-copy-source': copy_source_url,
-            'x-ms-source-content-md5': source_content_md5,
-            'x-ms-source-if-Modified-Since': _datetime_to_utc_string(source_if_modified_since),
-            'x-ms-source-if-Unmodified-Since': _datetime_to_utc_string(source_if_unmodified_since),
-            'x-ms-source-if-Match': _to_str(source_if_match),
-            'x-ms-source-if-None-Match': _to_str(source_if_none_match),
-            'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
-            'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-
-        _validate_and_format_range_headers(request, source_range_start, source_range_end,
-                                           start_range_required=False,
-                                           end_range_required=False,
-                                           range_header_name="x-ms-source-range")
-
-        return self._perform_request(request, _parse_append_block)
-
-    # ----Convenience APIs----------------------------------------------
-
-    def append_blob_from_path(
-            self, container_name, blob_name, file_path, validate_content=False,
-            maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None):
-        '''
-        Appends to the content of an existing blob from a file path, with automatic
-        chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.append_blob_from_stream(
-                container_name,
-                blob_name,
-                stream,
-                count=count,
-                validate_content=validate_content,
-                maxsize_condition=maxsize_condition,
-                progress_callback=progress_callback,
-                lease_id=lease_id,
-                timeout=timeout,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match)
-
-    def append_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None):
-        '''
-        Appends to the content of an existing blob from an array of bytes, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.append_blob_from_stream(
-            container_name,
-            blob_name,
-            stream,
-            count=count,
-            validate_content=validate_content,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            timeout=timeout,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match)
-
-    def append_blob_from_text(
-            self, container_name, blob_name, text, encoding='utf-8',
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None):
-        '''
-        Appends to the content of an existing blob from str/unicode, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        return self.append_blob_from_bytes(
-            container_name,
-            blob_name,
-            text,
-            index=0,
-            count=len(text),
-            validate_content=validate_content,
-            maxsize_condition=maxsize_condition,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            timeout=timeout,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match)
-
-    def append_blob_from_stream(
-            self, container_name, blob_name, stream, count=None,
-            validate_content=False, maxsize_condition=None, progress_callback=None,
-            lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None):
-        '''
-        Appends to the content of an existing blob from a file/stream, with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param int maxsize_condition:
-            Conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetime will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :return: ETag and last modified properties for the Append Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        # _upload_blob_chunks returns the block ids for block blobs so resource_properties
-        # is passed as a parameter to get the last_modified and etag for page and append blobs.
-        # this info is not needed for block_blobs since _put_block_list is called after which gets this info
-        resource_properties = ResourceProperties()
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_BLOCK_SIZE,
-            stream=stream,
-            max_connections=1,  # upload not easily parallelizable
-            progress_callback=progress_callback,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            uploader_class=_AppendBlobChunkUploader,
-            maxsize_condition=maxsize_condition,
-            timeout=timeout,
-            resource_properties=resource_properties,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match
-        )
-
-        return resource_properties
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/baseblobservice.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/baseblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/baseblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/baseblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,3397 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from abc import ABCMeta
-
-from azure.common import AzureHttpError
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-    _StorageNoAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _parse_metadata,
-    _parse_properties,
-    _convert_xml_to_service_stats,
-    _parse_length_from_content_range,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_decryption_required,
-    _validate_access_policies,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-    _validate_user_delegation_key,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    BlobSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_containers,
-    _parse_blob,
-    _convert_xml_to_blob_list,
-    _convert_xml_to_blob_name_list,
-    _parse_container,
-    _parse_snapshot_blob,
-    _parse_lease,
-    _convert_xml_to_signed_identifiers_and_access,
-    _parse_base_properties,
-    _parse_account_information,
-    _convert_xml_to_user_delegation_key,
-)
-from ._download_chunking import _download_blob_chunks
-from ._error import (
-    _ERROR_INVALID_LEASE_DURATION,
-    _ERROR_INVALID_LEASE_BREAK_PERIOD,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-    _convert_delegation_key_info_to_xml,
-)
-from .models import (
-    BlobProperties,
-    _LeaseActions,
-    ContainerPermissions,
-    BlobPermissions,
-)
-
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-_CONTAINER_ALREADY_EXISTS_ERROR_CODE = 'ContainerAlreadyExists'
-_BLOB_NOT_FOUND_ERROR_CODE = 'BlobNotFound'
-_CONTAINER_NOT_FOUND_ERROR_CODE = 'ContainerNotFound'
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class BaseBlobService(StorageClient):
-    '''
-    This is the main class managing Blob resources.
-
-    The Blob service stores text and binary data as blobs in the cloud.
-    The Blob service offers the following three resources: the storage account,
-    containers, and blobs. Within your storage account, containers provide a
-    way to organize sets of blobs. For more information please see:
-    https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx
-
-    :ivar int MAX_SINGLE_GET_SIZE:
-        The size of the first range get performed by get_blob_to_* methods if
-        max_connections is greater than 1. Less data will be returned if the
-        blob is smaller than this.
-    :ivar int MAX_CHUNK_GET_SIZE:
-        The size of subsequent range gets performed by get_blob_to_* methods if
-        max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE.
-        Less data will be returned if the remainder of the blob is smaller than
-        this. If this is set to larger than 4MB, content_validation will throw an
-        error if enabled. However, if content_validation is not desired a size
-        greater than 4MB may be optimal. Setting this below 4MB is not recommended.
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    __metaclass__ = ABCMeta
-    MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None,
-                 connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'blob',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            token_credential=token_credential,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            custom_domain=custom_domain,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(BaseBlobService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-                self.is_emulated
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        elif self.token_credential:
-            self.authentication = self.token_credential
-        else:
-            self.authentication = _StorageNoAuthentication()
-
-        self.require_encryption = False
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None):
-        '''
-        Creates the url to access a blob.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when BaseBlobService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :param str snapshot:
-            An string value that uniquely identifies the snapshot. The value of
-            this query parameter indicates the snapshot version.
-        :return: blob access URL.
-        :rtype: str
-        '''
-
-        url = '{}://{}/{}/{}'.format(
-            protocol or self.protocol,
-            self.primary_endpoint,
-            container_name,
-            blob_name,
-        )
-
-        if snapshot and sas_token:
-            url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token)
-        elif snapshot:
-            url = '{}?snapshot={}'.format(url, snapshot)
-        elif sas_token:
-            url = '{}?{}'.format(url, sas_token)
-
-        return url
-
-    def make_container_url(self, container_name, protocol=None, sas_token=None):
-        '''
-        Creates the url to access a container.
-
-        :param str container_name:
-            Name of container.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when BaseBlobService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: container access URL.
-        :rtype: str
-        '''
-
-        url = '{}://{}/{}?restype=container'.format(
-            protocol or self.protocol,
-            self.primary_endpoint,
-            container_name,
-        )
-
-        if sas_token:
-            url = '{}&{}'.format(url, sas_token)
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the blob service.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = BlobSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.BLOB, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_container_shared_access_signature(self, container_name,
-                                                   permission=None, expiry=None,
-                                                   start=None, id=None, ip=None, protocol=None,
-                                                   cache_control=None, content_disposition=None,
-                                                   content_encoding=None, content_language=None,
-                                                   content_type=None, user_delegation_key=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use 
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
-            Instead of an account key, the user could pass in a user delegation key.
-            A user delegation key can be obtained from the service by authenticating with an AAD identity;
-            this can be accomplished by calling get_user_delegation_key.
-            When present, the SAS is signed with the user delegation key instead.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('self.account_name', self.account_name)
-
-        if user_delegation_key is not None:
-            _validate_user_delegation_key(user_delegation_key)
-            sas = BlobSharedAccessSignature(self.account_name, user_delegation_key=user_delegation_key)
-        else:
-            _validate_not_none('self.account_key', self.account_key)
-            sas = BlobSharedAccessSignature(self.account_name, account_key=self.account_key)
-
-        return sas.generate_container(
-            container_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_blob_shared_access_signature(
-            self, container_name, blob_name, snapshot=None, permission=None,
-            expiry=None, start=None, id=None, ip=None, protocol=None,
-            cache_control=None, content_disposition=None,
-            content_encoding=None, content_language=None,
-            content_type=None, user_delegation_key=None):
-        '''
-        Generates a shared access signature for the blob or one of its snapshots.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to grant permission.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_container_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
-            Instead of an account key, the user could pass in a user delegation key.
-            A user delegation key can be obtained from the service by authenticating with an AAD identity;
-            this can be accomplished by calling get_user_delegation_key.
-            When present, the SAS is signed with the user delegation key instead.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('self.account_name', self.account_name)
-
-        if user_delegation_key is not None:
-            _validate_user_delegation_key(user_delegation_key)
-            sas = BlobSharedAccessSignature(self.account_name, user_delegation_key=user_delegation_key)
-        else:
-            _validate_not_none('self.account_key', self.account_key)
-            sas = BlobSharedAccessSignature(self.account_name, account_key=self.account_key)
-
-        return sas.generate_blob(
-            container_name=container_name,
-            blob_name=blob_name,
-            snapshot=snapshot,
-            permission=permission,
-            expiry=expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def get_user_delegation_key(self, key_start_time, key_expiry_time, timeout=None):
-        """
-        Obtain a user delegation key for the purpose of signing SAS tokens.
-        A token credential must be present on the service object for this request to succeed.
-
-        :param datetime key_start_time:
-            A DateTime value. Indicates when the key becomes valid.
-        :param datetime key_expiry_time:
-            A DateTime value. Indicates when the key stops being valid.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-        """
-        _validate_not_none('key_start_time', key_start_time)
-        _validate_not_none('key_end_time', key_expiry_time)
-
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.query = {
-            'restype': 'service',
-            'comp': 'userdelegationkey',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(_convert_delegation_key_info_to_xml(key_start_time, key_expiry_time))
-        return self._perform_request(request, _convert_xml_to_user_delegation_key)
-
-    def list_containers(self, prefix=None, num_results=None, include_metadata=False,
-                        marker=None, timeout=None):
-        '''
-        Returns a generator to list the containers under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all containers have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        containers, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        include = 'metadata' if include_metadata else None
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_containers(**kwargs)
-
-        return ListGenerator(resp, self._list_containers, (), kwargs)
-
-    def _list_containers(self, prefix=None, marker=None, max_results=None,
-                         include=None, timeout=None, _context=None):
-        '''
-        Returns a list of the containers under the specified account.
-
-        :param str prefix:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of containers to return. A single list
-            request may return up to 1000 contianers and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body. set this
-            parameter to string 'metadata' to get container's metadata.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_containers, operation_context=_context)
-
-    def create_container(self, container_name, metadata=None,
-                         public_access=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails if
-        fail_on_exist is True.
-
-        :param str container_name:
-            Name of container to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            container as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str)
-        :param ~azure.storage.blob.models.PublicAccess public_access:
-            Possible values include: container, blob.
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the container exists.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is created, False if container already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-public-access': _to_str(public_access)
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request, expected_errors=[_CONTAINER_ALREADY_EXISTS_ERROR_CODE])
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_container_properties(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata and system properties for the specified
-        container. The data returned does not include the container's list of blobs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_properties only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified container within a container object.
-        :rtype: :class:`~azure.storage.blob.models.Container`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _parse_container, [container_name])
-
-    def get_container_metadata(self, container_name, lease_id=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            If specified, get_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the container metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_container_metadata(self, container_name, metadata=None,
-                               lease_id=None, if_modified_since=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        container. Each call to this operation replaces all existing metadata
-        attached to the container. To remove all metadata from the container,
-        call this operation with no metadata dict.
-
-        :param str container_name:
-            Name of existing container.
-        :param metadata:
-            A dict containing name-value pairs to associate with the container as 
-            metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            If specified, set_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def get_container_acl(self, container_name, lease_id=None, timeout=None):
-        '''
-        Gets the permissions for the specified container.
-        The permissions indicate whether container data may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param lease_id:
-            If specified, get_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the container. dict of str to
-            :class:`azure.storage.common.models.AccessPolicy` and a public_access property
-            if public access is turned on
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access)
-
-    def set_container_acl(self, container_name, signed_identifiers=None,
-                          public_access=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Sets the permissions for the specified container or stored access 
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param str container_name:
-            Name of existing container.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the container. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param ~azure.storage.blob.models.PublicAccess public_access:
-            Possible values include: container, blob.
-        :param str lease_id:
-            If specified, set_container_acl only succeeds if the
-            container's lease is active and matches this ID.
-        :param datetime if_modified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified date/time.
-        :param datetime if_unmodified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Container
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-public-access': _to_str(public_access),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def delete_container(self, container_name, fail_not_exist=False,
-                         lease_id=None, if_modified_since=None,
-                         if_unmodified_since=None, timeout=None):
-        '''
-        Marks the specified container for deletion. The container and any blobs
-        contained within it are later deleted during garbage collection.
-
-        :param str container_name:
-            Name of container to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the container doesn't
-            exist.
-        :param str lease_id:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if container is deleted, False container doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request, expected_errors=[_CONTAINER_NOT_FOUND_ERROR_CODE])
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def _lease_container_impl(
-            self, container_name, lease_action, lease_id, lease_duration,
-            lease_break_period, proposed_lease_id, if_modified_since,
-            if_unmodified_since, timeout):
-        '''
-        Establishes and manages a lease on a container.
-        The Lease Container operation can be called in one of five modes
-            Acquire, to request a new lease
-            Renew, to renew an existing lease
-            Change, to change the ID of an existing lease
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the container
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_action:
-            Possible _LeaseActions values: acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the container has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. For backwards compatibility, the default is
-            60, and the value is only used on an acquire operation.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for Acquire, required for Change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'lease',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-lease-action': _to_str(lease_action),
-            'x-ms-lease-duration': _to_str(lease_duration),
-            'x-ms-lease-break-period': _to_str(lease_break_period),
-            'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-        }
-
-        return self._perform_request(request, _parse_lease)
-
-    def acquire_container_lease(
-            self, container_name, lease_duration=-1, proposed_lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Requests a new lease. If the container does not have an active lease,
-        the Blob service creates a lease on the container and returns a new
-        lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-        if lease_duration != -1 and \
-                (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Acquire,
-                                           None,  # lease_id
-                                           lease_duration,
-                                           None,  # lease_break_period
-                                           proposed_lease_id,
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['id']
-
-    def renew_container_lease(
-            self, container_name, lease_id, if_modified_since=None,
-            if_unmodified_since=None, timeout=None):
-        '''
-        Renews the lease. The lease can be renewed if the lease ID specified
-        matches that associated with the container. Note that
-        the lease may be renewed even if it has expired as long as the container
-        has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Renew,
-                                           lease_id,
-                                           None,  # lease_duration
-                                           None,  # lease_break_period
-                                           None,  # proposed_lease_id
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['id']
-
-    def release_container_lease(
-            self, container_name, lease_id, if_modified_since=None,
-            if_unmodified_since=None, timeout=None):
-        '''
-        Release the lease. The lease may be released if the lease_id specified matches
-        that associated with the container. Releasing the lease allows another client
-        to immediately acquire the lease for the container as soon as the release is complete. 
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name,
-                                   _LeaseActions.Release,
-                                   lease_id,
-                                   None,  # lease_duration
-                                   None,  # lease_break_period
-                                   None,  # proposed_lease_id
-                                   if_modified_since,
-                                   if_unmodified_since,
-                                   timeout)
-
-    def break_container_lease(
-            self, container_name, lease_break_period=None,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Break the lease, if the container has an active lease. Once a lease is
-        broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired. 
-
-        :param str container_name:
-            Name of existing container.
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        lease = self._lease_container_impl(container_name,
-                                           _LeaseActions.Break,
-                                           None,  # lease_id
-                                           None,  # lease_duration
-                                           lease_break_period,
-                                           None,  # proposed_lease_id
-                                           if_modified_since,
-                                           if_unmodified_since,
-                                           timeout)
-        return lease['time']
-
-    def change_container_lease(
-            self, container_name, lease_id, proposed_lease_id,
-            if_modified_since=None, if_unmodified_since=None, timeout=None):
-        '''
-        Change the lease ID of an active lease. A change must include the current
-        lease ID and a new lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_container_impl(container_name,
-                                   _LeaseActions.Change,
-                                   lease_id,
-                                   None,  # lease_duration
-                                   None,  # lease_break_period
-                                   proposed_lease_id,
-                                   if_modified_since,
-                                   if_unmodified_since,
-                                   timeout)
-
-    def list_blobs(self, container_name, prefix=None, num_results=None, include=None,
-                   delimiter=None, marker=None, timeout=None):
-        '''
-        Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all blobs have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        blobs, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`BlobPrefix` elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param ~azure.storage.blob.models.Include include:
-            Specifies one or more additional datasets to include in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
-            result list that acts as a placeholder for all blobs whose names begin
-            with the same substring up to the appearance of the delimiter character.
-            The delimiter may be a single character or a string.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (container_name,)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'delimiter': delimiter, 'timeout': timeout,
-                  '_context': operation_context,
-                  '_converter': _convert_xml_to_blob_list}
-        resp = self._list_blobs(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_blobs, args, kwargs)
-
-    def list_blob_names(self, container_name, prefix=None, num_results=None,
-                        include=None, delimiter=None, marker=None,
-                        timeout=None):
-        '''
-        Returns a generator to list the blob names under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all blobs have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        blobs, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`BlobPrefix` elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param ~azure.storage.blob.models.Include include:
-            Specifies one or more additional datasets to include in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
-            result list that acts as a placeholder for all blobs whose names begin
-            with the same substring up to the appearance of the delimiter character.
-            The delimiter may be a single character or a string.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (container_name,)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'delimiter': delimiter, 'timeout': timeout,
-                  '_context': operation_context,
-                  '_converter': _convert_xml_to_blob_name_list}
-        resp = self._list_blobs(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_blobs, args, kwargs)
-
-    def _list_blobs(self, container_name, prefix=None, marker=None,
-                    max_results=None, include=None, delimiter=None, timeout=None,
-                    _context=None, _converter=None):
-        '''
-        Returns the list of blobs under the specified container.
-
-        :param str container_name:
-            Name of existing container.
-        :parm str prefix:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of blobs to return,
-            including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str include:
-            Specifies one or more datasets to include in the
-            response. To specify more than one of these options on the URI,
-            you must separate each option with a comma. Valid values are:
-                snapshots:
-                    Specifies that snapshots should be included in the
-                    enumeration. Snapshots are listed from oldest to newest in
-                    the response.
-                metadata:
-                    Specifies that blob metadata be returned in the response.
-                uncommittedblobs:
-                    Specifies that blobs for which blocks have been uploaded,
-                    but which have not been committed using Put Block List
-                    (REST API), be included in the response.
-                copy:
-                    Version 2012-02-12 and newer. Specifies that metadata
-                    related to any current or previous Copy Blob operation
-                    should be included in the response.
-                deleted:
-                    Version 2017-07-29 and newer. Specifies that soft deleted blobs
-                    which are retained by the service should be included
-                    in the response.
-        :param str delimiter:
-            When the request includes this parameter, the operation
-            returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a
-            placeholder for all blobs whose names begin with the same
-            substring up to the appearance of the delimiter character. The
-            delimiter may be a single character or a string.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name)
-        request.query = {
-            'restype': 'container',
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'delimiter': _to_str(delimiter),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _converter, operation_context=_context)
-
-    def get_blob_account_information(self, container_name=None, blob_name=None, timeout=None):
-        """
-        Gets information related to the storage account.
-        The information can also be retrieved if the user has a SAS to a container or blob.
-
-        :param str container_name:
-            Name of existing container.
-            Optional, unless using a SAS token to a specific container or blob, in which case it's required.
-        :param str blob_name:
-            Name of existing blob.
-            Optional, unless using a SAS token to a specific blob, in which case it's required.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The :class:`~azure.storage.blob.models.AccountInformation`.
-        """
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'restype': 'account',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _parse_account_information)
-
-    def get_blob_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Blob service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def set_blob_service_properties(
-            self, logging=None, hour_metrics=None, minute_metrics=None,
-            cors=None, target_version=None, timeout=None, delete_retention_policy=None, static_website=None):
-        '''
-        Sets the properties of a storage account's Blob service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param logging:
-            Groups the Azure Analytics Logging settings.
-        :type logging:
-            :class:`~azure.storage.common.models.Logging`
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for blobs.
-        :type hour_metrics:
-            :class:`~azure.storage.common.models.Metrics`
-        :param minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for blobs.
-        :type minute_metrics:
-            :class:`~azure.storage.common.models.Metrics`
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param str target_version:
-            Indicates the default version to use for requests if an incoming 
-            request's version is not specified. 
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param delete_retention_policy:
-            The delete retention policy specifies whether to retain deleted blobs.
-            It also specifies the number of days and versions of blob to keep.
-        :type delete_retention_policy:
-            :class:`~azure.storage.common.models.DeleteRetentionPolicy`
-        :param static_website:
-            Specifies whether the static website feature is enabled,
-            and if yes, indicates the index document and 404 error document to use.
-        :type static_website:
-            :class:`~azure.storage.common.models.StaticWebsite`
-        '''
-        if all(parameter is None for parameter in [logging, hour_metrics, minute_metrics, cors, target_version,
-                                                   delete_retention_policy, static_website]):
-
-            raise ValueError("set_blob_service_properties should be called with at least one parameter")
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
-                                               cors, target_version, delete_retention_policy, static_website))
-
-        self._perform_request(request)
-
-    def get_blob_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob :class:`~azure.storage.common.models.ServiceProperties` with an attached
-            target_version property.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def get_blob_properties(
-            self, container_name, blob_name, snapshot=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the blob. It does not return the content of the blob.
-        Returns :class:`~azure.storage.blob.models.Blob`
-        with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: a blob object including properties and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_blob, [blob_name, snapshot])
-
-    def set_blob_properties(
-            self, container_name, blob_name, content_settings=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Sets system properties on the blob. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def exists(self, container_name, blob_name=None, snapshot=None, timeout=None):
-        '''
-        Returns a boolean indicating whether the container exists (if blob_name 
-        is None), or otherwise a boolean indicating whether the blob exists.
-
-        :param str container_name:
-            Name of a container.
-        :param str blob_name:
-            Name of a blob. If None, the container will be checked for existence.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the snapshot.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('container_name', container_name)
-        try:
-            # make head request to see if container/blob/snapshot exists
-            request = HTTPRequest()
-            request.method = 'GET' if blob_name is None else 'HEAD'
-            request.host_locations = self._get_host_locations(secondary=True)
-            request.path = _get_path(container_name, blob_name)
-            request.query = {
-                'snapshot': _to_str(snapshot),
-                'timeout': _int_to_str(timeout),
-                'restype': 'container' if blob_name is None else None,
-            }
-
-            expected_errors = [_CONTAINER_NOT_FOUND_ERROR_CODE] if blob_name is None \
-                else [_CONTAINER_NOT_FOUND_ERROR_CODE, _BLOB_NOT_FOUND_ERROR_CODE]
-            self._perform_request(request, expected_errors=expected_errors)
-
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def _get_blob(
-            self, container_name, blob_name, snapshot=None, start_range=None,
-            end_range=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            _context=None):
-        '''
-        Downloads a blob's content, metadata, and properties. You can also
-        call this API to read a snapshot. You can specify a range if you don't
-        need to download the blob in its entirety. If no range is specified,
-        the full blob will be downloaded.
-
-        See get_blob_to_* for high level functions that handle the download
-        of large blobs with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            When this is set to True and specified together with the Range header, 
-            the service returns the MD5 hash for the range, as long as the range 
-            is less than or equal to 4 MB in size.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A Blob with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_decryption_required(self.require_encryption,
-                                      self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        start_offset, end_offset = 0, 0
-        if self.key_encryption_key is not None or self.key_resolver_function is not None:
-            if start_range is not None:
-                # Align the start of the range along a 16 byte block
-                start_offset = start_range % 16
-                start_range -= start_offset
-
-                # Include an extra 16 bytes for the IV if necessary
-                # Because of the previous offsetting, start_range will always
-                # be a multiple of 16.
-                if start_range > 0:
-                    start_offset += 16
-                    start_range -= 16
-
-            if end_range is not None:
-                # Align the end of the range along a 16 byte block
-                end_offset = 15 - (end_range % 16)
-                end_range += end_offset
-
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=validate_content)
-
-        return self._perform_request(request, _parse_blob,
-                                     [blob_name, snapshot, validate_content, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function,
-                                      start_offset, end_offset],
-                                     operation_context=_context)
-
-    def get_blob_to_path(
-            self, container_name, blob_name, file_path, open_mode='wb',
-            snapshot=None, start_range=None, end_range=None,
-            validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None):
-        '''
-        Downloads a blob to a file path, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str file_path:
-            Path of file to write out to.
-        :param str open_mode:
-            Mode to use when opening the file. Note that specifying append only 
-            open_mode prevents parallel download. So, max_connections must be set 
-            to 1 if this open_mode is used.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        if max_connections > 1 and 'a' in open_mode:
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        with open(file_path, open_mode) as stream:
-            blob = self.get_blob_to_stream(
-                container_name,
-                blob_name,
-                stream,
-                snapshot,
-                start_range,
-                end_range,
-                validate_content,
-                progress_callback,
-                max_connections,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout)
-
-        return blob
-
-    def get_blob_to_stream(
-            self, container_name, blob_name, stream, snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-
-        '''
-        Downloads a blob to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties and metadata.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param io.IOBase stream:
-            Opened stream to write to.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-
-        if end_range is not None:
-            _validate_not_none("start_range", start_range)
-
-        # the stream must be seekable if parallel download is required
-        if max_connections > 1:
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-            try:
-                stream.seek(stream.tell())
-            except (NotImplementedError, AttributeError):
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        # The service only provides transactional MD5s for chunks under 4MB.
-        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
-        # chunk so a transactional MD5 can be retrieved.
-        first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
-
-        initial_request_start = start_range if start_range is not None else 0
-
-        if end_range is not None and end_range - start_range < first_get_size:
-            initial_request_end = end_range
-        else:
-            initial_request_end = initial_request_start + first_get_size - 1
-
-        # Send a context object to make sure we always retry to the initial location
-        operation_context = _OperationContext(location_lock=True)
-        try:
-            blob = self._get_blob(container_name,
-                                  blob_name,
-                                  snapshot,
-                                  start_range=initial_request_start,
-                                  end_range=initial_request_end,
-                                  validate_content=validate_content,
-                                  lease_id=lease_id,
-                                  if_modified_since=if_modified_since,
-                                  if_unmodified_since=if_unmodified_since,
-                                  if_match=if_match,
-                                  if_none_match=if_none_match,
-                                  timeout=timeout,
-                                  _context=operation_context)
-
-            # Parse the total blob size and adjust the download size if ranges
-            # were specified
-            blob_size = _parse_length_from_content_range(blob.properties.content_range)
-            if end_range is not None:
-                # Use the end_range unless it is over the end of the blob
-                download_size = min(blob_size, end_range - start_range + 1)
-            elif start_range is not None:
-                download_size = blob_size - start_range
-            else:
-                download_size = blob_size
-        except AzureHttpError as ex:
-            if start_range is None and ex.status_code == 416:
-                # Get range will fail on an empty blob. If the user did not
-                # request a range, do a regular get request in order to get
-                # any properties.
-                blob = self._get_blob(container_name,
-                                      blob_name,
-                                      snapshot,
-                                      validate_content=validate_content,
-                                      lease_id=lease_id,
-                                      if_modified_since=if_modified_since,
-                                      if_unmodified_since=if_unmodified_since,
-                                      if_match=if_match,
-                                      if_none_match=if_none_match,
-                                      timeout=timeout,
-                                      _context=operation_context)
-
-                # Set the download size to empty
-                download_size = 0
-            else:
-                raise ex
-
-        # Mark the first progress chunk. If the blob is small or this is a single
-        # shot download, this is the only call
-        if progress_callback:
-            progress_callback(blob.properties.content_length, download_size)
-
-        # Write the content to the user stream
-        # Clear blob content since output has been written to user stream
-        if blob.content is not None:
-            stream.write(blob.content)
-            blob.content = None
-
-        # If the blob is small, the download is complete at this point.
-        # If blob size is large, download the rest of the blob in chunks.
-        if blob.properties.content_length != download_size:
-            # Lock on the etag. This can be overriden by the user by specifying '*'
-            if_match = if_match if if_match is not None else blob.properties.etag
-
-            end_blob = blob_size
-            if end_range is not None:
-                # Use the end_range unless it is over the end of the blob
-                end_blob = min(blob_size, end_range + 1)
-
-            _download_blob_chunks(
-                self,
-                container_name,
-                blob_name,
-                snapshot,
-                download_size,
-                self.MAX_CHUNK_GET_SIZE,
-                first_get_size,
-                initial_request_end + 1,  # start where the first download ended
-                end_blob,
-                stream,
-                max_connections,
-                progress_callback,
-                validate_content,
-                lease_id,
-                if_modified_since,
-                if_unmodified_since,
-                if_match,
-                if_none_match,
-                timeout,
-                operation_context
-            )
-
-            # Set the content length to the download size instead of the size of
-            # the last range
-            blob.properties.content_length = download_size
-
-            # Overwrite the content range to the user requested range
-            blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size)
-
-            # Overwrite the content MD5 as it is the MD5 for the last range instead
-            # of the stored MD5
-            # TODO: Set to the stored MD5 when the service returns this
-            blob.properties.content_md5 = None
-
-        return blob
-
-    def get_blob_to_bytes(
-            self, container_name, blob_name, snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Downloads a blob as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        stream = BytesIO()
-        blob = self.get_blob_to_stream(
-            container_name,
-            blob_name,
-            stream,
-            snapshot,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            lease_id,
-            if_modified_since,
-            if_unmodified_since,
-            if_match,
-            if_none_match,
-            timeout)
-
-        blob.content = stream.getvalue()
-        return blob
-
-    def get_blob_to_text(
-            self, container_name, blob_name, encoding='utf-8', snapshot=None,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Downloads a blob as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
-        properties, metadata, and content.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str encoding:
-            Python encoding to use when decoding the blob data.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the blob.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param int end_range:
-            End of byte range to use for downloading a section of the blob.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the blob. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the blob if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be useful if many blobs are 
-            expected to be empty as an extra request is required for empty blobs 
-            if max_connections is greater than 1.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :return: A Blob with properties and metadata. If max_connections is greater 
-            than 1, the content_md5 (if set on the blob) will not be returned. If you 
-            require this value, either use get_blob_properties or set max_connections 
-            to 1.
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('encoding', encoding)
-
-        blob = self.get_blob_to_bytes(container_name,
-                                      blob_name,
-                                      snapshot,
-                                      start_range,
-                                      end_range,
-                                      validate_content,
-                                      progress_callback,
-                                      max_connections,
-                                      lease_id,
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        blob.content = blob.content.decode(encoding)
-        return blob
-
-    def get_blob_metadata(
-            self, container_name, blob_name, snapshot=None, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Returns all user-defined metadata for the specified blob or snapshot.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque value that,
-            when present, specifies the blob snapshot to retrieve.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            A dictionary representing the blob metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_blob_metadata(self, container_name, blob_name,
-                          metadata=None, lease_id=None,
-                          if_modified_since=None, if_unmodified_since=None,
-                          if_match=None, if_none_match=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified blob as one or more
-        name-value pairs.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the blob. To remove all
-            metadata from the blob, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _lease_blob_impl(self, container_name, blob_name,
-                         lease_action, lease_id,
-                         lease_duration, lease_break_period,
-                         proposed_lease_id, if_modified_since,
-                         if_unmodified_since, if_match, if_none_match, timeout=None):
-        '''
-        Establishes and manages a lease on a blob for write and delete operations.
-        The Lease Blob operation can be called in one of five modes:
-            Acquire, to request a new lease.
-            Renew, to renew an existing lease.
-            Change, to change the ID of an existing lease.
-            Release, to free the lease if it is no longer needed so that another
-                client may immediately acquire a lease against the blob.
-            Break, to end the lease but ensure that another client cannot acquire
-                a new lease until the current lease period has expired.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_action:
-            Possible _LeaseActions acquire|renew|release|break|change
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param str proposed_lease_id:
-            Optional for acquire, required for change. Proposed lease ID, in a
-            GUID string format. The Blob service returns 400 (Invalid request)
-            if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-            Response headers returned from the service call.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('lease_action', lease_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'lease',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-lease-action': _to_str(lease_action),
-            'x-ms-lease-duration': _to_str(lease_duration),
-            'x-ms-lease-break-period': _to_str(lease_break_period),
-            'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_lease)
-
-    def acquire_blob_lease(self, container_name, blob_name,
-                           lease_duration=-1,
-                           proposed_lease_id=None,
-                           if_modified_since=None,
-                           if_unmodified_since=None,
-                           if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Requests a new lease. If the blob does not have an active lease, the Blob
-        service creates a lease on the blob and returns a new lease ID.
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the newly created lease.
-        :return: str
-        '''
-        _validate_not_none('lease_duration', lease_duration)
-
-        if lease_duration != -1 and \
-                (lease_duration < 15 or lease_duration > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_DURATION)
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Acquire,
-                                      None,  # lease_id
-                                      lease_duration,
-                                      None,  # lease_break_period
-                                      proposed_lease_id,
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['id']
-
-    def renew_blob_lease(self, container_name, blob_name,
-                         lease_id, if_modified_since=None,
-                         if_unmodified_since=None, if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Renews the lease. The lease can be renewed if the lease ID specified on
-        the request matches that associated with the blob. Note that the lease may
-        be renewed even if it has expired as long as the blob has not been modified
-        or leased again since the expiration of that lease. When you renew a lease,
-        the lease duration clock resets. 
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the lease ID of the renewed lease.
-        :return: str
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Renew,
-                                      lease_id,
-                                      None,  # lease_duration
-                                      None,  # lease_break_period
-                                      None,  # proposed_lease_id
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['id']
-
-    def release_blob_lease(self, container_name, blob_name,
-                           lease_id, if_modified_since=None,
-                           if_unmodified_since=None, if_match=None,
-                           if_none_match=None, timeout=None):
-        '''
-        Releases the lease. The lease may be released if the lease ID specified on the
-        request matches that associated with the blob. Releasing the lease allows another
-        client to immediately acquire the lease for the blob as soon as the release is complete. 
-        
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Lease ID for active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('lease_id', lease_id)
-
-        self._lease_blob_impl(container_name,
-                              blob_name,
-                              _LeaseActions.Release,
-                              lease_id,
-                              None,  # lease_duration
-                              None,  # lease_break_period
-                              None,  # proposed_lease_id
-                              if_modified_since,
-                              if_unmodified_since,
-                              if_match,
-                              if_none_match,
-                              timeout)
-
-    def break_blob_lease(self, container_name, blob_name,
-                         lease_break_period=None,
-                         if_modified_since=None,
-                         if_unmodified_since=None,
-                         if_match=None,
-                         if_none_match=None, timeout=None):
-        '''
-        Breaks the lease, if the blob has an active lease. Once a lease is broken,
-        it cannot be renewed. Any authorized request can break the lease; the request
-        is not required to specify a matching lease ID. When a lease is broken,
-        the lease break period is allowed to elapse, during which time no lease operation
-        except break and release can be performed on the blob. When a lease is successfully
-        broken, the response indicates the interval in seconds until a new lease can be acquired. 
-
-        A lease that has been broken can also be released, in which case another client may
-        immediately acquire the lease on the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int lease_break_period:
-            For a break operation, this is the proposed duration of
-            seconds that the lease should continue before it is broken, between
-            0 and 60 seconds. This break period is only used if it is shorter
-            than the time remaining on the lease. If longer, the time remaining
-            on the lease is used. A new lease will not be available before the
-            break period has expired, but the lease may be held for longer than
-            the break period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :return: int
-        '''
-        if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
-            raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
-
-        lease = self._lease_blob_impl(container_name,
-                                      blob_name,
-                                      _LeaseActions.Break,
-                                      None,  # lease_id
-                                      None,  # lease_duration
-                                      lease_break_period,
-                                      None,  # proposed_lease_id
-                                      if_modified_since,
-                                      if_unmodified_since,
-                                      if_match,
-                                      if_none_match,
-                                      timeout)
-        return lease['time']
-
-    def change_blob_lease(self, container_name, blob_name,
-                          lease_id,
-                          proposed_lease_id,
-                          if_modified_since=None,
-                          if_unmodified_since=None,
-                          if_match=None,
-                          if_none_match=None, timeout=None):
-        '''
-        Changes the lease ID of an active lease. A change must include the current
-        lease ID and a new lease ID.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format. 
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        self._lease_blob_impl(container_name,
-                              blob_name,
-                              _LeaseActions.Change,
-                              lease_id,
-                              None,  # lease_duration
-                              None,  # lease_break_period
-                              proposed_lease_id,
-                              if_modified_since,
-                              if_unmodified_since,
-                              if_match,
-                              if_none_match,
-                              timeout)
-
-    def snapshot_blob(self, container_name, blob_name,
-                      metadata=None, if_modified_since=None,
-                      if_unmodified_since=None, if_match=None,
-                      if_none_match=None, lease_id=None, timeout=None):
-        '''
-        Creates a read-only snapshot of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param metadata:
-            Specifies a user-defined name-value pair associated with the blob.
-            If no name-value pairs are specified, the operation will copy the
-            base blob metadata to the snapshot. If one or more name-value pairs
-            are specified, the snapshot is created with the specified metadata,
-            and metadata is not copied from the base blob.
-        :type metadata: dict(str, str)
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: :class:`~azure.storage.blob.models.Blob`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'snapshot',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_snapshot_blob, [blob_name])
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None):
-        '''
-        Copies a blob asynchronously. This operation returns a copy operation 
-        properties object, including a copy ID you can use to check or abort the 
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation may be a block blob, an append blob, 
-        or a page blob. If the destination blob already exists, it must be of the 
-        same blob type as the source blob. Any existing destination blob will be 
-        overwritten. The destination blob cannot be modified while a copy operation 
-        is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page 
-        blob of the source blob's length, initially containing all zeroes. Then 
-        the source page ranges are enumerated, and non-empty ranges are copied. 
-
-        For a block blob or an append blob, the Blob service creates a committed 
-        blob of zero length before returning from this operation. When copying 
-        from a block blob, all committed blocks and their block IDs are copied. 
-        Uncommitted blocks are not copied. At the end of the copy operation, the 
-        destination blob will have the same committed block count as the source.
-
-        When copying from an append blob, all committed blocks are copied. At the 
-        end of the copy operation, the destination blob will have the same committed 
-        block count as the source.
-
-        For all blob types, you can call get_blob_properties on the destination 
-        blob to check the status of the copy operation. The final blob will be 
-        committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will 
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob. 
-            The value should be URL-encoded as it would appear in a request URI. 
-            If the source is in another account, the source must either be public 
-            or must be authenticated via a shared access signature. If the source 
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value 
-            pairs are specified, the operation will copy the metadata from the 
-            source blob or file to the destination blob. If one or more name-value 
-            pairs are specified, the destination blob is created with the specified 
-            metadata, and metadata is not copied from the source blob or file. 
-        :type metadata: dict(str, str)
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.  
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               None,
-                               source_if_modified_since, source_if_unmodified_since,
-                               source_if_match, source_if_none_match,
-                               destination_if_modified_since,
-                               destination_if_unmodified_since,
-                               destination_if_match,
-                               destination_if_none_match,
-                               destination_lease_id,
-                               source_lease_id, timeout,
-                               False, False)
-
-    def _copy_blob(self, container_name, blob_name, copy_source,
-                   metadata=None,
-                   premium_page_blob_tier=None,
-                   source_if_modified_since=None,
-                   source_if_unmodified_since=None,
-                   source_if_match=None, source_if_none_match=None,
-                   destination_if_modified_since=None,
-                   destination_if_unmodified_since=None,
-                   destination_if_match=None,
-                   destination_if_none_match=None,
-                   destination_lease_id=None,
-                   source_lease_id=None, timeout=None,
-                   incremental_copy=False,
-                   requires_sync=None):
-        '''
-        See copy_blob for more details. This helper method
-        allows for standard copies as well as incremental copies which are only supported for page blobs and sync
-        copies which are only supported for block blobs.
-        :param bool incremental_copy:
-            Performs an incremental copy operation on a page blob instead of a standard copy operation.
-        :param bool requires_sync:
-            Enforces that the service will not return a response until the copy is complete.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source', copy_source)
-
-        if copy_source.startswith('/'):
-            # Backwards compatibility for earlier versions of the SDK where
-            # the copy source can be in the following formats:
-            # - Blob in named container:
-            #     /accountName/containerName/blobName
-            # - Snapshot in named container:
-            #     /accountName/containerName/blobName?snapshot=<DateTime>
-            # - Blob in root container:
-            #     /accountName/blobName
-            # - Snapshot in root container:
-            #     /accountName/blobName?snapshot=<DateTime>
-            account, _, source = \
-                copy_source.partition('/')[2].partition('/')
-            copy_source = self.protocol + '://' + \
-                          self.primary_endpoint + '/' + source
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-
-        if incremental_copy:
-            request.query = {
-                'comp': 'incrementalcopy',
-                'timeout': _int_to_str(timeout),
-            }
-        else:
-            request.query = {'timeout': _int_to_str(timeout)}
-
-        request.headers = {
-            'x-ms-copy-source': _to_str(copy_source),
-            'x-ms-source-if-modified-since': _to_str(source_if_modified_since),
-            'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since),
-            'x-ms-source-if-match': _to_str(source_if_match),
-            'x-ms-source-if-none-match': _to_str(source_if_none_match),
-            'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since),
-            'If-Match': _to_str(destination_if_match),
-            'If-None-Match': _to_str(destination_if_none_match),
-            'x-ms-lease-id': _to_str(destination_lease_id),
-            'x-ms-source-lease-id': _to_str(source_lease_id),
-            'x-ms-access-tier': _to_str(premium_page_blob_tier),
-            'x-ms-requires-sync': _to_str(requires_sync)
-        }
-
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_properties, [BlobProperties]).copy
-
-    def abort_copy_blob(self, container_name, blob_name, copy_id,
-                        lease_id=None, timeout=None):
-        '''
-         Aborts a pending copy_blob operation, and leaves a destination blob
-         with zero length and full metadata.
-
-         :param str container_name:
-             Name of destination container.
-         :param str blob_name:
-             Name of destination blob.
-         :param str copy_id:
-             Copy identifier provided in the copy.id of the original
-             copy_blob operation.
-         :param str lease_id:
-             Required if the destination blob has an active infinite lease.
-         :param int timeout:
-             The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'copy',
-            'copyid': _to_str(copy_id),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-copy-action': 'abort',
-        }
-
-        self._perform_request(request)
-
-    def delete_blob(self, container_name, blob_name, snapshot=None,
-                    lease_id=None, delete_snapshots=None,
-                    if_modified_since=None, if_unmodified_since=None,
-                    if_match=None, if_none_match=None, timeout=None):
-        '''
-        Marks the specified blob or snapshot for deletion.
-        The blob is later deleted during garbage collection.
-
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the Delete
-        Blob operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
-        and retains the blob or snapshot for specified number of days.
-        After specified number of days, blob's data is removed from the service during garbage collection.
-        Soft deleted blob or snapshot is accessible through List Blobs API specifying include=Include.Deleted option.
-        Soft-deleted blob or snapshot can be restored using Undelete API.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to delete.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots:
-            Required if the blob has associated snapshots.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-delete-snapshots': _to_str(delete_snapshots),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        request.query = {
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout)
-        }
-
-        self._perform_request(request)
-
-    def undelete_blob(self, container_name, blob_name, timeout=None):
-        '''
-        The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot.
-        Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'undelete',
-            'timeout': _int_to_str(timeout)
-        }
-
-        self._perform_request(request)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/blockblobservice.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/blockblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/blockblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/blockblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1199 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from io import (
-    BytesIO
-)
-from os import (
-    path,
-)
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _to_str,
-    _int_to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_VALUE_SHOULD_BE_STREAM
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _get_data_bytes_only,
-    _get_data_bytes_or_stream_only,
-    _add_metadata_headers,
-)
-from ..common._serialization import (
-    _len_plus
-)
-from ._deserialization import (
-    _convert_xml_to_block_list,
-    _parse_base_properties,
-)
-from ._encryption import (
-    _encrypt_blob,
-    _generate_blob_encryption_data,
-)
-from ._serialization import (
-    _convert_block_list_to_xml,
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import (
-    _BlockBlobChunkUploader,
-    _upload_blob_chunks,
-    _upload_blob_substream_blocks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-)
-
-
-class BlockBlobService(BaseBlobService):
-    '''
-    Block blobs let you upload large blobs efficiently. Block blobs are comprised
-    of blocks, each of which is identified by a block ID. You create or modify a
-    block blob by writing a set of blocks and committing them by their block IDs.
-    Each block can be a different size, up to a maximum of 100 MB, and a block blob
-    can include up to 50,000 blocks. The maximum size of a block blob is therefore
-    approximately 4.75 TB (100 MB X 50,000 blocks). If you are writing a block
-    blob that is no more than 64 MB in size, you can upload it in its entirety with
-    a single write operation; see create_blob_from_bytes.
-
-    :ivar int MAX_SINGLE_PUT_SIZE:
-        The largest size upload supported in a single put call. This is used by
-        the create_blob_from_* methods if the content length is known and is less
-        than this value.
-    :ivar int MAX_BLOCK_SIZE:
-        The size of the blocks put by create_blob_from_* methods if the content
-        length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks
-        may be put. The maximum block size the service supports is 100MB.
-    :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD:
-        The minimum block size at which the the memory-optimized, block upload
-        algorithm is considered. This algorithm is only applicable to the create_blob_from_file and
-        create_blob_from_stream methods and will prevent the full buffering of blocks.
-        In addition to the block size, ContentMD5 validation and Encryption must be disabled as
-        these options require the blocks to be buffered.
-    '''
-
-    MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024
-    MAX_BLOCK_SIZE = 4 * 1024 * 1024
-    MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None,
-                 request_session=None, connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests
-            signed with an account key and to construct the storage endpoint. It
-            is required unless a connection string is given, or if a custom
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication.
-            If neither account key or sas token is specified, anonymous access
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests
-             instead of the account key. If account key and sas token are both
-             specified, account key will be used to sign. If neither are
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will
-            override all other parameters besides connection string and request
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use the China cloud
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        self.blob_type = _BlobTypes.BlockBlob
-        super(BlockBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout, token_credential)
-
-    def put_block(self, container_name, blob_name, block, block_id,
-                  validate_content=False, lease_id=None, timeout=None):
-        '''
-        Creates a new block to be committed as part of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob.
-        :param block: Content of the block.
-        :type block: io.IOBase or bytes
-            Content of the block.
-        :param str block_id:
-            A valid Base64 string value that identifies the block. Prior to
-            encoding, the string must be less than or equal to 64 bytes in size.
-            For a given blob, the length of the value specified for the blockid
-            parameter must be the same size for each block. Note that the Base64
-            string must be URL-encoded.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        self._put_block(
-            container_name,
-            blob_name,
-            block,
-            block_id,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            timeout=timeout
-        )
-
-    def put_block_list(
-            self, container_name, blob_name, block_list, content_settings=None,
-            metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None):
-        '''
-        Writes a blob by specifying the list of block IDs that make up the blob.
-        In order to be written as part of a blob, a block must have been
-        successfully written to the server in a prior Put Block operation.
-
-        You can call Put Block List to update a blob by uploading only those
-        blocks that have changed, then committing the new and existing blocks
-        together. You can do this by specifying whether to commit a block from
-        the committed block list or from the uncommitted block list, or to commit
-        the most recently uploaded version of the block, whichever list it may
-        belong to.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param block_list:
-            A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state.
-        :type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`)
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the block list content. The storage
-            service checks the hash of the block list content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this check is associated with
-            the block list content, and not with the content of the blob itself.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._put_block_list(
-            container_name,
-            blob_name,
-            block_list,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def get_block_list(self, container_name, blob_name, snapshot=None,
-                       block_list_type=None, lease_id=None, timeout=None):
-        '''
-        Retrieves the list of blocks that have been uploaded as part of a
-        block blob. There are two block lists maintained for a blob:
-            Committed Block List:
-                The list of blocks that have been successfully committed to a
-                given blob with Put Block List.
-            Uncommitted Block List:
-                The list of blocks that have been uploaded for a blob using
-                Put Block, but that have not yet been committed. These blocks
-                are stored in Azure in association with a blob, but do not yet
-                form part of the blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            Datetime to determine the time to retrieve the blocks.
-        :param str block_list_type:
-            Specifies whether to return the list of committed blocks, the list
-            of uncommitted blocks, or both lists together. Valid values are:
-            committed, uncommitted, or all.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: list committed and/or uncommitted blocks for Block Blob
-        :rtype: :class:`~azure.storage.blob.models.BlobBlockList`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'blocklist',
-            'snapshot': _to_str(snapshot),
-            'blocklisttype': _to_str(block_list_type),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {'x-ms-lease-id': _to_str(lease_id)}
-
-        return self._perform_request(request, _convert_xml_to_block_list)
-
-    def put_block_from_url(self, container_name, blob_name, copy_source_url, block_id,
-                           source_range_start=None, source_range_end=None,
-                           source_content_md5=None, lease_id=None, timeout=None):
-        """
-        Creates a new block to be committed as part of a blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob.
-        :param str copy_source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int source_range_start:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-        :param int source_range_end:
-            This indicates the end of the range of bytes(inclusive) that has to be taken from the copy source.
-        :param str block_id:
-            A valid Base64 string value that identifies the block. Prior to
-            encoding, the string must be less than or equal to 64 bytes in size.
-            For a given blob, the length of the value specified for the blockid
-            parameter must be the same size for each block. Note that the Base64
-            string must be URL-encoded.
-        :param str source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source_url', copy_source_url)
-        _validate_not_none('block_id', block_id)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'block',
-            'blockid': _encode_base64(_to_str(block_id)),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-copy-source': copy_source_url,
-            'x-ms-source-content-md5': source_content_md5,
-        }
-        _validate_and_format_range_headers(
-            request,
-            source_range_start,
-            source_range_end,
-            start_range_required=False,
-            end_range_required=False,
-            range_header_name="x-ms-source-range"
-        )
-
-        self._perform_request(request)
-
-    # ----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-            self, container_name, blob_name, file_path, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-    def create_blob_from_stream(
-            self, container_name, blob_name, stream, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None, use_byte_buffer=False):
-        '''
-        Creates a new blob from a file/stream, or updates the content of
-        an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB. Note that parallel upload requires the stream to be seekable.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :param bool use_byte_buffer:
-            If True, this will force usage of the original full block buffering upload path.
-            By default, this value is False and will employ a memory-efficient,
-            streaming upload algorithm under the following conditions:
-            The provided stream is seekable, 'require_encryption' is False, and
-            MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD.
-            One should consider the drawbacks of using this approach. In order to achieve
-            memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks
-            using a SubStream wrapper. In order to read the correct data, each SubStream must acquire
-            a lock so that it can safely seek to the right position on the shared, underlying stream.
-            If max_connections > 1, the concurrency will result in a considerable amount of seeking on
-            the underlying stream. For the most common inputs such as a file-like stream object, seeking
-            is an inexpensive operation and this is not much of a concern. However, for other variants of streams
-            this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking
-            with your input stream.
-            The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of
-            seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        # Adjust count to include padding if we are expected to encrypt.
-        adjusted_count = count
-        if (self.key_encryption_key is not None) and (adjusted_count is not None):
-            adjusted_count += (16 - (count % 16))
-
-        # Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE
-        if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE):
-            if progress_callback:
-                progress_callback(0, count)
-
-            data = stream.read(count)
-            resp = self._put_blob(
-                container_name=container_name,
-                blob_name=blob_name,
-                blob=data,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout)
-
-            if progress_callback:
-                progress_callback(count, count)
-
-            return resp
-        else:  # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls
-            cek, iv, encryption_data = None, None, None
-
-            use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \
-                                       self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \
-                                       hasattr(stream, 'seekable') and not stream.seekable() or \
-                                       not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
-
-            if use_original_upload_path:
-                if self.key_encryption_key:
-                    cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
-
-                block_ids = _upload_blob_chunks(
-                    blob_service=self,
-                    container_name=container_name,
-                    blob_name=blob_name,
-                    blob_size=count,
-                    block_size=self.MAX_BLOCK_SIZE,
-                    stream=stream,
-                    max_connections=max_connections,
-                    progress_callback=progress_callback,
-                    validate_content=validate_content,
-                    lease_id=lease_id,
-                    uploader_class=_BlockBlobChunkUploader,
-                    timeout=timeout,
-                    content_encryption_key=cek,
-                    initialization_vector=iv
-                )
-            else:
-                block_ids = _upload_blob_substream_blocks(
-                    blob_service=self,
-                    container_name=container_name,
-                    blob_name=blob_name,
-                    blob_size=count,
-                    block_size=self.MAX_BLOCK_SIZE,
-                    stream=stream,
-                    max_connections=max_connections,
-                    progress_callback=progress_callback,
-                    validate_content=validate_content,
-                    lease_id=lease_id,
-                    uploader_class=_BlockBlobChunkUploader,
-                    timeout=timeout,
-                )
-
-            return self._put_block_list(
-                container_name=container_name,
-                blob_name=blob_name,
-                block_list=block_ids,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout,
-                encryption_data=encryption_data
-            )
-
-    def create_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_not_none('index', index)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            lease_id=lease_id,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            use_byte_buffer=True
-        )
-
-    def create_blob_from_text(
-            self, container_name, blob_name, text, encoding='utf-8',
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None):
-        '''
-        Creates a new blob from str/unicode, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str text:
-            Text to upload to the blob.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :return: ETag and last modified properties for the Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        return self.create_blob_from_bytes(
-            container_name=container_name,
-            blob_name=blob_name,
-            blob=text,
-            index=0,
-            count=len(text),
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout)
-
-    def set_standard_blob_tier(
-            self, container_name, blob_name, standard_blob_tier, timeout=None):
-        '''
-        Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to update.
-        :param StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('standard_blob_tier', standard_blob_tier)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'tier',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-access-tier': _to_str(standard_blob_tier)
-        }
-
-        self._perform_request(request)
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None, source_if_modified_since=None,
-                  source_if_unmodified_since=None, source_if_match=None,
-                  source_if_none_match=None, destination_if_modified_since=None,
-                  destination_if_unmodified_since=None, destination_if_match=None,
-                  destination_if_none_match=None, destination_lease_id=None,
-                  source_lease_id=None, timeout=None, requires_sync=None):
-
-        '''
-        Copies a blob. This operation returns a copy operation
-        properties object. The copy operation may be configured to either be an
-        asynchronous, best-effort operation, or a synchronous operation.
-
-        The source must be a block blob if requires_sync is true. Any existing
-        destination blob will be overwritten. The destination blob cannot be
-        modified while a copy operation is in progress.
-
-        When copying from a block blob, all committed blocks and their block IDs are
-        copied. Uncommitted blocks are not copied. At the end of the copy operation,
-        the destination blob will have the same committed block count as the source.
-
-        You can call get_blob_properties on the destination blob to check the status
-        of the copy operation. The final blob will be committed when the copy completes.
-
-        :param str container_name:
-        Name of the destination container. The container must exist.
-        :param str blob_name:
-        Name of the destination blob. If the destination blob exists, it will
-        be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-        A URL of up to 2 KB in length that specifies an Azure file or blob.
-        The value should be URL-encoded as it would appear in a request URI.
-        If the source is in another account, the source must either be public
-        or must be authenticated via a shared access signature. If the source
-        is public, no authentication is required.
-        Examples:
-        https://myaccount.blob.core.windows.net/mycontainer/myblob
-        https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-        https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-        Name-value pairs associated with the blob as metadata. If no name-value
-        pairs are specified, the operation will copy the metadata from the
-        source blob or file to the destination blob. If one or more name-value
-        pairs are specified, the destination blob is created with the specified
-        metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str)
-        :param datetime source_if_modified_since:
-        A DateTime value. Azure expects the date value passed in to be UTC.
-        If timezone is included, any non-UTC datetimes will be converted to UTC.
-        If a date is passed in without timezone info, it is assumed to be UTC.
-        Specify this conditional header to copy the blob only if the source
-        blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-        A DateTime value. Azure expects the date value passed in to be UTC.
-        If timezone is included, any non-UTC datetimes will be converted to UTC.
-        If a date is passed in without timezone info, it is assumed to be UTC.
-        Specify this conditional header to copy the blob only if the source blob
-        has not been modified since the specified date/time.
-        :param ETag source_if_match:
-        An ETag value, or the wildcard character (*). Specify this conditional
-        header to copy the source blob only if its ETag matches the value
-        specified. If the ETag values do not match, the Blob service returns
-        status code 412 (Precondition Failed). This header cannot be specified
-        if the source is an Azure File.
-        :param ETag source_if_none_match:
-        An ETag value, or the wildcard character (*). Specify this conditional
-        header to copy the blob only if its ETag does not match the value
-        specified. If the values are identical, the Blob service returns status
-        code 412 (Precondition Failed). This header cannot be specified if the
-        source is an Azure File.
-        :param datetime destination_if_modified_since:
-        A DateTime value. Azure expects the date value passed in to be UTC.
-        If timezone is included, any non-UTC datetimes will be converted to UTC.
-        If a date is passed in without timezone info, it is assumed to be UTC.
-        Specify this conditional header to copy the blob only
-        if the destination blob has been modified since the specified date/time.
-        If the destination blob has not been modified, the Blob service returns
-        status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-        A DateTime value. Azure expects the date value passed in to be UTC.
-        If timezone is included, any non-UTC datetimes will be converted to UTC.
-        If a date is passed in without timezone info, it is assumed to be UTC.
-        Specify this conditional header to copy the blob only
-        if the destination blob has not been modified since the specified
-        date/time. If the destination blob has been modified, the Blob service
-        returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-        An ETag value, or the wildcard character (*). Specify an ETag value for
-        this conditional header to copy the blob only if the specified ETag value
-        matches the ETag value for an existing destination blob. If the ETag for
-        the destination blob does not match the ETag specified for If-Match, the
-        Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-        An ETag value, or the wildcard character (*). Specify an ETag value for
-        this conditional header to copy the blob only if the specified ETag value
-        does not match the ETag value for the destination blob. Specify the wildcard
-        character (*) to perform the operation only if the destination blob does not
-        exist. If the specified condition isn't met, the Blob service returns status
-        code 412 (Precondition Failed).
-        :param str destination_lease_id:
-        The lease ID specified for this header must match the lease ID of the
-        destination blob. If the request does not include the lease ID or it is not
-        valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-        Specify this to perform the Copy Blob operation only if
-        the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-        The timeout parameter is expressed in seconds.
-        :param bool requires_sync:
-        Enforces that the service will not return a response until the copy is complete.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               premium_page_blob_tier=None,
-                               source_if_modified_since=source_if_modified_since,
-                               source_if_unmodified_since=source_if_unmodified_since,
-                               source_if_match=source_if_match,
-                               source_if_none_match=source_if_none_match,
-                               destination_if_modified_since=destination_if_modified_since,
-                               destination_if_unmodified_since=destination_if_unmodified_since,
-                               destination_if_match=destination_if_match,
-                               destination_if_none_match=destination_if_none_match,
-                               destination_lease_id=destination_lease_id,
-                               source_lease_id=source_lease_id, timeout=timeout,
-                               incremental_copy=False,
-                               requires_sync=requires_sync)
-
-    # -----Helper methods------------------------------------
-    def _put_blob(self, container_name, blob_name, blob, content_settings=None,
-                  metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-                  if_unmodified_since=None, if_match=None, if_none_match=None,
-                  timeout=None):
-        '''
-        Creates a blob or updates an existing blob.
-
-        See create_blob_from_* for high level
-        functions that handle the creation and upload of large blobs with
-        automatic chunking and progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as bytes (size < 64MB). For larger size, you
-            must call put_block and put_block_list to set content of blob.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the blob content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the new Block Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-        blob = _get_data_bytes_only('blob', blob)
-        if self.key_encryption_key:
-            encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key)
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-        request.body = blob
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _put_block(self, container_name, blob_name, block, block_id,
-                   validate_content=False, lease_id=None, timeout=None):
-        '''
-        See put_block for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block', block)
-        _validate_not_none('block_id', block_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'block',
-            'blockid': _encode_base64(_to_str(block_id)),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id)
-        }
-        request.body = _get_data_bytes_or_stream_only('block', block)
-        if hasattr(request.body, 'read'):
-            if _len_plus(request.body) is None:
-                try:
-                    data = b''
-                    for chunk in iter(lambda: request.body.read(4096), b""):
-                        data += chunk
-                    request.body = data
-                except AttributeError:
-                    raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body'))
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        self._perform_request(request)
-
-    def _put_block_list(
-            self, container_name, blob_name, block_list, content_settings=None,
-            metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None,
-            timeout=None, encryption_data=None):
-        '''
-        See put_block_list for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        :param str encryption_data:
-            A JSON formatted string containing the encryption metadata generated for this 
-            blob if it was encrypted all at once upon upload. This should only be passed
-            in by internal methods.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('block_list', block_list)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'blocklist',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-        request.body = _get_request_body(
-            _convert_block_list_to_xml(block_list))
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        if encryption_data is not None:
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-
-        return self._perform_request(request, _parse_base_properties)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/models.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/models.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,825 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _to_str
-
-
-class Container(object):
-    '''
-    Blob container class. 
-    
-    :ivar str name: 
-        The name of the container.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the container as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list containers operation. If this parameter was specified but the 
-        container has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    :ivar ContainerProperties properties:
-        System properties for the container.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or ContainerProperties()
-        self.metadata = metadata
-
-
-class ContainerProperties(object):
-    '''
-    Blob container's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the container was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar LeaseProperties lease:
-        Stores all the lease information for the container.
-    :ivar bool has_immutability_policy:
-        Represents whether the container has an immutability policy.
-    :ivar bool has_legal_hold:
-        Represents whether the container has a legal hold.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.lease = LeaseProperties()
-        self.public_access = None
-        self.has_immutability_policy = None
-        self.has_legal_hold = None
-
-
-class Blob(object):
-    '''
-    Blob class.
-    
-    :ivar str name:
-        Name of blob.
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    :ivar content:
-        Blob content.
-    :vartype content: str or bytes
-    :ivar BlobProperties properties:
-        Stores all the system properties for the blob.
-    :ivar metadata:
-        Name-value pairs associated with the blob as metadata.
-    :ivar bool deleted:
-        Specify whether the blob was soft deleted.
-        In other words, if the blob is being retained by the delete retention policy,
-        this field would be True. The blob could be undeleted or it will be garbage collected after the specified
-        time period.
-    '''
-
-    def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None, deleted=False):
-        self.name = name
-        self.snapshot = snapshot
-        self.content = content
-        self.properties = props or BlobProperties()
-        self.metadata = metadata
-        self.deleted = deleted
-
-
-class BlobProperties(object):
-    '''
-    Blob Properties
-    
-    :ivar str blob_type:
-        String indicating this blob's type.
-    :ivar datetime last_modified:
-        A datetime object representing the last time the blob was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The length of the content returned. If the entire blob was requested, 
-        the length of blob in bytes. If a subset of the blob was requested, the 
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client 
-        requested a subset of the blob.
-    :ivar int append_blob_committed_block_count:
-        (For Append Blobs) Number of committed blocks in the blob.
-    :ivar int page_blob_sequence_number:
-        (For Page Blobs) Sequence number for page blob used for coordinating
-        concurrent writes.
-    :ivar bool server_encrypted:
-        Set to true if the blob is encrypted on the server.
-    :ivar ~azure.storage.blob.models.CopyProperties copy:
-        Stores all the copy properties for the blob.
-    :ivar ~azure.storage.blob.models.ContentSettings content_settings:
-        Stores all the content settings for the blob.
-    :ivar ~azure.storage.blob.models.LeaseProperties lease:
-        Stores all the lease information for the blob.
-    :ivar StandardBlobTier blob_tier:
-        Indicates the access tier of the blob. The hot tier is optimized
-        for storing data that is accessed frequently. The cool storage tier
-        is optimized for storing data that is infrequently accessed and stored
-        for at least a month. The archive tier is optimized for storing
-        data that is rarely accessed and stored for at least six months
-        with flexible latency requirements.
-    :ivar datetime blob_tier_change_time:
-        Indicates when the access tier was last changed.
-    :ivar bool blob_tier_inferred:
-        Indicates whether the access tier was inferred by the service.
-        If false, it indicates that the tier was set explicitly.
-    :ivar datetime deleted_time:
-        A datetime object representing the time at which the blob was deleted.
-    :ivar int remaining_retention_days:
-        The number of days that the blob will be retained before being permanently deleted by the service.
-    :ivar datetime creation_time:
-        Indicates when the blob was created, in UTC.
-    '''
-
-    def __init__(self):
-        self.blob_type = None
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_range = None
-        self.append_blob_committed_block_count = None
-        self.page_blob_sequence_number = None
-        self.server_encrypted = None
-        self.copy = CopyProperties()
-        self.content_settings = ContentSettings()
-        self.lease = LeaseProperties()
-        self.blob_tier = None
-        self.blob_tier_change_time = None
-        self.blob_tier_inferred = False
-        self.deleted_time = None
-        self.remaining_retention_days = None
-        self.creation_time = None
-
-
-class ContentSettings(object):
-    '''
-    Used to store the content settings of a blob.
-    
-    :ivar str content_type:
-        The content type specified for the blob. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If the content_encoding has previously been set
-        for the blob, that value is stored.
-    :ivar str content_language:
-        If the content_language has previously been set
-        for the blob, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the blob, that value is stored.
-    :ivar str cache_control:
-        If the cache_control has previously been set for
-        the blob, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the blob, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None):
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return {
-            'x-ms-blob-cache-control': _to_str(self.cache_control),
-            'x-ms-blob-content-type': _to_str(self.content_type),
-            'x-ms-blob-content-disposition': _to_str(self.content_disposition),
-            'x-ms-blob-content-md5': _to_str(self.content_md5),
-            'x-ms-blob-content-encoding': _to_str(self.content_encoding),
-            'x-ms-blob-content-language': _to_str(self.content_language),
-        }
-
-
-class CopyProperties(object):
-    '''
-    Blob Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy Blob operation where this blob
-        was the destination blob. This header does not appear if this blob has never
-        been the destination in a Copy Blob operation, or if this blob has been
-        modified after a concluded Copy Blob operation using Set Blob Properties,
-        Put Blob, or Put Block List.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source blob used in the last attempted
-        Copy Blob operation where this blob was the destination blob. This header does not
-        appear if this blob has never been the destination in a Copy Blob operation, or if
-        this blob has been modified after a concluded Copy Blob operation using
-        Set Blob Properties, Put Blob, or Put Block List.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending:
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy Blob.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy Blob operation where this blob was the destination blob. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy Blob operation where this blob was the
-        destination blob. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class LeaseProperties(object):
-    '''
-    Blob Lease Properties.
-    
-    :ivar str status:
-        The lease status of the blob.
-        Possible values: locked|unlocked
-    :ivar str state:
-        Lease state of the blob.
-        Possible values: available|leased|expired|breaking|broken
-    :ivar str duration:
-        When a blob is leased, specifies whether the lease is of infinite or fixed duration.
-    '''
-
-    def __init__(self):
-        self.status = None
-        self.state = None
-        self.duration = None
-
-
-class BlobPrefix(object):
-    '''
-    BlobPrefix objects may potentially returned in the blob list when 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is 
-    used with a delimiter. Prefixes can be thought of as virtual blob directories.
-    
-    :ivar str name: The name of the blob prefix.
-    '''
-
-    def __init__(self):
-        self.name = None
-
-
-class BlobBlockState(object):
-    '''Block blob block types.'''
-
-    Committed = 'Committed'
-    '''Committed blocks.'''
-
-    Latest = 'Latest'
-    '''Latest blocks.'''
-
-    Uncommitted = 'Uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class BlobBlock(object):
-    '''
-    BlockBlob Block class.
-    
-    :ivar str id:
-        Block id.
-    :ivar str state:
-        Block state.
-        Possible valuse: committed|uncommitted
-    :ivar int size:
-        Block size in bytes.
-    '''
-
-    def __init__(self, id=None, state=BlobBlockState.Latest):
-        self.id = id
-        self.state = state
-
-    def _set_size(self, size):
-        self.size = size
-
-
-class BlobBlockList(object):
-    '''
-    Blob Block List class.
-   
-    :ivar committed_blocks:
-        List of committed blocks.
-    :vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
-    :ivar uncommitted_blocks:
-        List of uncommitted blocks.
-    :vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
-    '''
-
-    def __init__(self):
-        self.committed_blocks = list()
-        self.uncommitted_blocks = list()
-
-
-class PageRange(object):
-    '''
-    Page Range for page blob.
-    
-    :ivar int start:
-        Start of page range in bytes.
-    :ivar int end:
-        End of page range in bytes.
-    :ivar bool is_cleared:
-        Indicates if a page range is cleared or not. Only applicable
-        for get_page_range_diff API.
-    '''
-
-    def __init__(self, start=None, end=None, is_cleared=False):
-        self.start = start
-        self.end = end
-        self.is_cleared = is_cleared
-
-
-class ResourceProperties(object):
-    '''
-    Base response for a resource request.
-    
-    :ivar str etag:
-        Opaque etag value that can be used to check if resource
-        has been modified.
-    :ivar datetime last_modified:
-        Datetime for last time resource was modified.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-
-
-class AppendBlockProperties(ResourceProperties):
-    '''
-    Response for an append block request.
-    
-    :ivar int append_offset:
-        Position to start next append.
-    :ivar int committed_block_count:
-        Number of committed append blocks.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.append_offset = None
-        self.committed_block_count = None
-
-
-class PageBlobProperties(ResourceProperties):
-    '''
-    Response for a page request.
-    
-    :ivar int sequence_number:
-        Identifer for page blobs to help handle concurrent writes.
-    '''
-
-    def __init__(self):
-        super(ResourceProperties, self).__init__()
-        self.sequence_number = None
-
-
-class PublicAccess(object):
-    '''
-    Specifies whether data in the container may be accessed publicly and the level of access.
-    '''
-
-    OFF = 'off'
-    '''
-    Specifies that there is no public read access for both the container and blobs within the container.
-    Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
-    '''
-
-    Blob = 'blob'
-    '''
-    Specifies public read access for blobs. Blob data within this container can be read 
-    via anonymous request, but container data is not available. Clients cannot enumerate 
-    blobs within the container via anonymous request.
-    '''
-
-    Container = 'container'
-    '''
-    Specifies full public read access for container and blob data. Clients can enumerate 
-    blobs within the container via anonymous request, but cannot enumerate containers 
-    within the storage account.
-    '''
-
-
-class DeleteSnapshot(object):
-    '''
-    Required if the blob has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the base blob and all of its snapshots.
-    '''
-
-    Only = 'only'
-    '''
-    Delete only the blob's snapshots and not the blob itself.
-    '''
-
-
-class BlockListType(object):
-    '''
-    Specifies whether to return the list of committed blocks, the list of uncommitted 
-    blocks, or both lists together.
-    '''
-
-    All = 'all'
-    '''Both committed and uncommitted blocks.'''
-
-    Committed = 'committed'
-    '''Committed blocks.'''
-
-    Uncommitted = 'uncommitted'
-    '''Uncommitted blocks.'''
-
-
-class SequenceNumberAction(object):
-    '''Sequence number actions.'''
-
-    Increment = 'increment'
-    '''
-    Increments the value of the sequence number by 1. If specifying this option, 
-    do not include the x-ms-blob-sequence-number header.
-    '''
-
-    Max = 'max'
-    '''
-    Sets the sequence number to be the higher of the value included with the 
-    request and the value currently stored for the blob.
-    '''
-
-    Update = 'update'
-    '''Sets the sequence number to the value included with the request.'''
-
-
-class _LeaseActions(object):
-    '''Actions for a lease.'''
-
-    Acquire = 'acquire'
-    '''Acquire the lease.'''
-
-    Break = 'break'
-    '''Break the lease.'''
-
-    Change = 'change'
-    '''Change the lease ID.'''
-
-    Release = 'release'
-    '''Release the lease.'''
-
-    Renew = 'renew'
-    '''Renew the lease.'''
-
-
-class _BlobTypes(object):
-    '''Blob type options.'''
-
-    AppendBlob = 'AppendBlob'
-    '''Append blob type.'''
-
-    BlockBlob = 'BlockBlob'
-    '''Block blob type.'''
-
-    PageBlob = 'PageBlob'
-    '''Page blob type.'''
-
-
-class Include(object):
-    '''
-    Specifies the datasets to include in the blob list response.
-
-    :ivar ~azure.storage.blob.models.Include Include.COPY: 
-        Specifies that metadata related to any current or previous Copy Blob operation 
-        should be included in the response.
-    :ivar ~azure.storage.blob.models.Include Include.METADATA: 
-        Specifies that metadata be returned in the response.
-    :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: 
-        Specifies that snapshots should be included in the enumeration.
-    :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: 
-        Specifies that blobs for which blocks have been uploaded, but which have not 
-        been committed using Put Block List, be included in the response.
-    :ivar ~azure.storage.blob.models.Include Include.DELETED:
-        Specifies that deleted blobs should be returned in the response.
-    '''
-
-    def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False,
-                 copy=False, deleted=False, _str=None):
-        '''
-        :param bool snapshots:
-             Specifies that snapshots should be included in the enumeration.
-        :param bool metadata:
-            Specifies that metadata be returned in the response.
-        :param bool uncommitted_blobs:
-            Specifies that blobs for which blocks have been uploaded, but which have 
-            not been committed using Put Block List, be included in the response.
-        :param bool copy: 
-            Specifies that metadata related to any current or previous Copy Blob 
-            operation should be included in the response.
-        :param bool deleted:
-            Specifies that deleted blobs should be returned in the response.
-        :param str _str: 
-            A string representing the includes.
-        '''
-        if not _str:
-            _str = ''
-        components = _str.split(',')
-        self.snapshots = snapshots or ('snapshots' in components)
-        self.metadata = metadata or ('metadata' in components)
-        self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
-        self.copy = copy or ('copy' in components)
-        self.deleted = deleted or ('deleted' in components)
-
-    def __or__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Include(_str=str(self) + str(other))
-
-    def __str__(self):
-        include = (('snapshots,' if self.snapshots else '') +
-                   ('metadata,' if self.metadata else '') +
-                   ('uncommittedblobs,' if self.uncommitted_blobs else '') +
-                   ('copy,' if self.copy else '') +
-                   ('deleted,' if self.deleted else ''))
-        return include.rstrip(',')
-
-
-Include.COPY = Include(copy=True)
-Include.METADATA = Include(metadata=True)
-Include.SNAPSHOTS = Include(snapshots=True)
-Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
-Include.DELETED = Include(deleted=True)
-
-
-class BlobPermissions(object):
-    '''
-    BlobPermissions class to be used with 
-    :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
-
-    :ivar BlobPermissions BlobPermissions.ADD:
-        Add a block to an append blob.
-    :ivar BlobPermissions BlobPermissions.CREATE:
-        Write a new blob, snapshot a blob, or copy a blob to a new blob.
-    :ivar BlobPermissions BlobPermissions.DELETE:
-        Delete the blob.
-    :ivar BlobPermissions BlobPermissions.READ:
-        Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
-    :ivar BlobPermissions BlobPermissions.WRITE:
-        Create or write content, properties, metadata, or block list. Snapshot or lease 
-        the blob. Resize the blob (page blob only). Use the blob as the destination of a 
-        copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, add=False, create=False, write=False,
-                 delete=False, _str=None):
-        '''    
-        :param bool read:
-            Read the content, properties, metadata and block list. Use the blob as 
-            the source of a copy operation.
-        :param bool add:
-            Add a block to an append blob.
-        :param bool create:
-            Write a new blob, snapshot a blob, or copy a blob to a new blob.
-        :param bool write: 
-            Create or write content, properties, metadata, or block list. Snapshot 
-            or lease the blob. Resize the blob (page blob only). Use the blob as the 
-            destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the blob.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return BlobPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-BlobPermissions.ADD = BlobPermissions(add=True)
-BlobPermissions.CREATE = BlobPermissions(create=True)
-BlobPermissions.DELETE = BlobPermissions(delete=True)
-BlobPermissions.READ = BlobPermissions(read=True)
-BlobPermissions.WRITE = BlobPermissions(write=True)
-
-
-class ContainerPermissions(object):
-    '''
-    ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
-    API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. 
-
-    :ivar ContainerPermissions ContainerPermissions.DELETE:
-        Delete any blob in the container. Note: You cannot grant permissions to 
-        delete a container with a container SAS. Use an account SAS instead.
-    :ivar ContainerPermissions ContainerPermissions.LIST:
-        List blobs in the container.
-    :ivar ContainerPermissions ContainerPermissions.READ:
-        Read the content, properties, metadata or block list of any blob in the 
-        container. Use any blob in the container as the source of a copy operation.
-    :ivar ContainerPermissions ContainerPermissions.WRITE:
-        For any blob in the container, create or write content, properties, 
-        metadata, or block list. Snapshot or lease the blob. Resize the blob 
-        (page blob only). Use the blob as the destination of a copy operation 
-        within the same account. Note: You cannot grant permissions to read or 
-        write container properties or metadata, nor to lease a container, with 
-        a container SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, add=False, create=False, write=False, delete=False, list=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata or block list of any blob in the 
-            container. Use any blob in the container as the source of a copy operation.
-        :param bool add:
-            Add a block to any append blob in the container.
-        :param bool create:
-            Write a new blob to the container, snapshot any blob in the container, or copy a blob to
-            a new blob in the container. Note: You cannot grant permissions to create a container
-            with a container SAS. Use an account SAS to create a container instead.
-        :param bool write:
-            For any blob in the container, create or write content, properties, 
-            metadata, or block list. Snapshot or lease the blob. Resize the blob 
-            (page blob only). Use the blob as the destination of a copy operation 
-            within the same account. Note: You cannot grant permissions to read or 
-            write container properties or metadata, nor to lease a container, with 
-            a container SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any blob in the container. Note: You cannot grant permissions to 
-            delete a container with a container SAS. Use an account SAS instead.
-        :param bool list: 
-            List blobs in the container.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-
-    def __or__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ContainerPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else ''))
-
-
-ContainerPermissions.DELETE = ContainerPermissions(delete=True)
-ContainerPermissions.LIST = ContainerPermissions(list=True)
-ContainerPermissions.READ = ContainerPermissions(read=True)
-ContainerPermissions.WRITE = ContainerPermissions(write=True)
-ContainerPermissions.ADD = ContainerPermissions(add=True)
-ContainerPermissions.CREATE = ContainerPermissions(create=True)
-
-
-class PremiumPageBlobTier(object):
-    '''
-    Specifies the page blob tier to set the blob to. This is only applicable to page
-    blobs on premium storage accounts.
-    Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
-    for detailed information on the corresponding IOPS and throughtput per PageBlobTier.
-    '''
-
-    P4 = 'P4'
-    ''' P4 Tier '''
-
-    P6 = 'P6'
-    ''' P6 Tier '''
-
-    P10 = 'P10'
-    ''' P10 Tier '''
-
-    P20 = 'P20'
-    ''' P20 Tier '''
-
-    P30 = 'P30'
-    ''' P30 Tier '''
-
-    P40 = 'P40'
-    ''' P40 Tier '''
-
-    P50 = 'P50'
-    ''' P50 Tier '''
-
-    P60 = 'P60'
-    ''' P60 Tier '''
-
-
-class StandardBlobTier(object):
-    '''
-    Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts.
-    '''
-
-    Archive = 'Archive'
-    ''' Archive '''
-
-    Cool = 'Cool'
-    ''' Cool '''
-
-    Hot = 'Hot'
-    ''' Hot '''
-
-
-class AccountInformation(object):
-    """
-    Holds information related to the storage account.
-
-    :ivar str sku_name:
-        Name of the storage SKU, also known as account type.
-        Example: Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS, Premium_ZRS
-    :ivar str account_kind:
-        Describes the flavour of the storage account, also known as account kind.
-        Example: Storage, StorageV2, BlobStorage
-    """
-    def __init__(self):
-        self.sku_name = None
-        self.account_kind = None
-
-
-class UserDelegationKey(object):
-    """
-    Represents a user delegation key, provided to the user by Azure Storage
-    based on their Azure Active Directory access token.
-
-    The fields are saved as simple strings since the user does not have to interact with this object;
-    to generate an identify SAS, the user can simply pass it to the right API.
-
-    :ivar str signed_oid:
-        Object ID of this token.
-    :ivar str signed_tid:
-        Tenant ID of the tenant that issued this token.
-    :ivar str signed_start:
-        The datetime this token becomes valid.
-    :ivar str signed_expiry:
-        The datetime this token expires.
-    :ivar str signed_service:
-        What service this key is valid for.
-    :ivar str signed_version:
-        The version identifier of the REST service that created this token.
-    :ivar str value:
-        The user delegation key.
-    """
-    def __init__(self):
-        self.signed_oid = None
-        self.signed_tid = None
-        self.signed_start = None
-        self.signed_expiry = None
-        self.signed_service = None
-        self.signed_version = None
-        self.value = None
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/pageblobservice.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/pageblobservice.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/pageblobservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/pageblobservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1522 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-from os import path
-
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _datetime_to_utc_string,
-    _get_content_md5,
-)
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._error import (
-    _validate_not_none,
-    _validate_type_bytes,
-    _validate_encryption_required,
-    _validate_encryption_unsupported,
-    _ERROR_VALUE_NEGATIVE,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_data_bytes_only,
-    _add_metadata_headers,
-)
-from ._deserialization import (
-    _convert_xml_to_page_ranges,
-    _parse_page_properties,
-    _parse_base_properties,
-)
-from ._encryption import _generate_blob_encryption_data
-from ._error import (
-    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
-)
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import (
-    _PageBlobChunkUploader,
-    _upload_blob_chunks,
-)
-from .baseblobservice import BaseBlobService
-from .models import (
-    _BlobTypes,
-    ResourceProperties)
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
-_PAGE_ALIGNMENT = 512
-
-
-class PageBlobService(BaseBlobService):
-    '''
-    Page blobs are a collection of 512-byte pages optimized for random read and
-    write operations. To create a page blob, you initialize the page blob and
-    specify the maximum size the page blob will grow. To add or update the
-    contents of a page blob, you write a page or pages by specifying an offset
-    and a range that align to 512-byte page boundaries. A write to a page blob
-    can overwrite just one page, some pages, or up to 4 MB of the page blob.
-    Writes to page blobs happen in-place and are immediately committed to the
-    blob. The maximum size for a page blob is 8 TB.
-
-    :ivar int MAX_PAGE_SIZE: 
-        The size of the pages put by create_blob_from_* methods. Smaller pages 
-        may be put if there is less data provided. The maximum page size the service 
-        supports is 4MB. When using the create_blob_from_* methods, empty pages are skipped.
-    '''
-
-    MAX_PAGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None,
-                 request_session=None, connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given, or if a custom 
-            domain is used with anonymous authentication.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-            If neither account key or sas token is specified, anonymous access 
-            will be used.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign. If neither are 
-             specified, anonymous access will be used.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param str custom_domain:
-            The custom domain to use. This can be set in the Azure Portal. For 
-            example, 'www.mydomain.com'.
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        self.blob_type = _BlobTypes.PageBlob
-        super(PageBlobService, self).__init__(
-            account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
-            custom_domain, request_session, connection_string, socket_timeout, token_credential)
-
-    def create_blob(
-            self, container_name, blob_name, content_length, content_settings=None,
-            sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new Page Blob.
-
-        See create_blob_from_* for high level functions that handle the
-        creation and upload of large blobs with automatic chunking and
-        progress notifications.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param int content_length:
-            Required. This header specifies the maximum size
-            for the page blob, up to 1 TB. The page blob size must be aligned
-            to a 512-byte boundary.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set properties on the blob.
-        :param int sequence_number:
-            The sequence number is a user-controlled value that you can use to
-            track requests. The value of the sequence number must be between 0
-            and 2^63 - 1.The default value is 0.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the new Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._create_blob(
-            container_name,
-            blob_name,
-            content_length,
-            content_settings=content_settings,
-            sequence_number=sequence_number,
-            metadata=metadata,
-            lease_id=lease_id,
-            premium_page_blob_tier=premium_page_blob_tier,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def incremental_copy_blob(self, container_name, blob_name, copy_source,
-                              metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None,
-                              destination_if_match=None, destination_if_none_match=None, destination_lease_id=None,
-                              source_lease_id=None, timeout=None):
-        '''
-        Copies an incremental copy of a blob asynchronously. This operation returns a copy operation
-        properties object, including a copy ID you can use to check or abort the
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for an incremental copy operation must be a page blob.
-        Call get_blob_properties on the destination blob to check the status of the copy operation.
-        The final blob will be committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure page blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            The copy source must be a snapshot and include a valid SAS token or be public.
-            Example:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>&sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str).
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the destination blob
-            has not been modified since the specified ate/time. If the destination blob
-            has been modified, the Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata,
-                               source_if_modified_since=None, source_if_unmodified_since=None,
-                               source_if_match=None, source_if_none_match=None,
-                               destination_if_modified_since=destination_if_modified_since,
-                               destination_if_unmodified_since=destination_if_unmodified_since,
-                               destination_if_match=destination_if_match,
-                               destination_if_none_match=destination_if_none_match,
-                               destination_lease_id=destination_lease_id,
-                               source_lease_id=source_lease_id, timeout=timeout,
-                               incremental_copy=True)
-
-    def update_page(
-            self, container_name, blob_name, page, start_range, end_range,
-            validate_content=False, lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        Updates a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param bytes page:
-            Content of the page.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-
-        return self._update_page(
-            container_name,
-            blob_name,
-            page,
-            start_range,
-            end_range,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            if_sequence_number_lte=if_sequence_number_lte,
-            if_sequence_number_lt=if_sequence_number_lt,
-            if_sequence_number_eq=if_sequence_number_eq,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout
-        )
-
-    def update_page_from_url(self, container_name, blob_name, start_range, end_range, copy_source_url,
-                             source_range_start, source_content_md5=None, source_if_modified_since=None,
-                             source_if_unmodified_since=None, source_if_match=None, source_if_none_match=None,
-                             lease_id=None, if_sequence_number_lte=None, if_sequence_number_lt=None,
-                             if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None,
-                             if_match=None, if_none_match=None, timeout=None):
-        """
-        Updates a range of pages to a page blob where the contents are read from a URL.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param str copy_source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int source_range_start:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-            The service will read the same number of bytes as the destination range (end_range-start_range).
-        :param str source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the source resource has been modified since the specified time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the source resource has not been modified since the specified date/time.
-        :param str source_if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the source resource's ETag matches the value specified.
-        :param str source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the source resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the source resource does not exist, and fail the
-            operation if it does exist.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('copy_source_url', copy_source_url)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'update',
-            'x-ms-copy-source': copy_source_url,
-            'x-ms-source-content-md5': source_content_md5,
-            'x-ms-source-if-Modified-Since': _datetime_to_utc_string(source_if_modified_since),
-            'x-ms-source-if-Unmodified-Since': _datetime_to_utc_string(source_if_unmodified_since),
-            'x-ms-source-if-Match': _to_str(source_if_match),
-            'x-ms-source-if-None-Match': _to_str(source_if_none_match),
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-        _validate_and_format_range_headers(
-            request,
-            source_range_start,
-            source_range_start+(end_range-start_range),
-            range_header_name="x-ms-source-range")
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def clear_page(
-            self, container_name, blob_name, start_range, end_range,
-            lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        Clears a range of pages.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int start_range:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param int end_range:
-            End of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :param int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value matches the
-            value specified. If the values do not match, the Blob service fails.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
-            header to write the page only if the blob's ETag value does not
-            match the value specified. If the values are identical, the Blob
-            service fails.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'clear',
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def get_page_ranges(
-            self, container_name, blob_name, snapshot=None, start_range=None,
-            end_range=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        Returns the list of valid page ranges for a Page Blob or snapshot
-        of a page blob.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to retrieve information
-            from.
-        :param int start_range:
-            Start of byte range to use for getting valid page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting valid page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of valid Page Ranges for the Page Blob.
-        :rtype: list(:class:`~azure.storage.blob.models.PageRange`)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'pagelist',
-            'snapshot': _to_str(snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        return self._perform_request(request, _convert_xml_to_page_ranges)
-
-    def get_page_ranges_diff(
-            self, container_name, blob_name, previous_snapshot, snapshot=None,
-            start_range=None, end_range=None, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
-        '''
-        The response will include only the pages that are different between either a
-        recent snapshot or the current blob and a previous snapshot, including pages
-        that were cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str previous_snapshot:
-            The snapshot parameter is an opaque DateTime value that
-            specifies a previous blob snapshot to be compared
-            against a more recent snapshot or the current blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that
-            specifies a more recent blob snapshot to be compared
-            against a previous snapshot (previous_snapshot).
-        :param int start_range:
-            Start of byte range to use for getting different page ranges.
-            If no end_range is given, all bytes after the start_range will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param int end_range:
-            End of byte range to use for getting different page ranges.
-            If end_range is given, start_range must be provided.
-            This range will return valid page ranges for from the offset start up to
-            offset end.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the end offset must be a modulus of
-            512-1. Examples of valid byte ranges are 0-511, 512-, etc.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A list of different Page Ranges for the Page Blob.
-        :rtype: list(:class:`~azure.storage.blob.models.PageRange`)
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('previous_snapshot', previous_snapshot)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'pagelist',
-            'snapshot': _to_str(snapshot),
-            'prevsnapshot': _to_str(previous_snapshot),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False,
-                align_to_page=True)
-
-        return self._perform_request(request, _convert_xml_to_page_ranges)
-
-    def set_sequence_number(
-            self, container_name, blob_name, sequence_number_action, sequence_number=None,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Sets the blob sequence number.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param str sequence_number_action:
-            This property indicates how the service should modify the blob's sequence
-            number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information.
-        :param str sequence_number:
-            This property sets the blob's sequence number. The sequence number is a
-            user-controlled property that you can use to track requests and manage
-            concurrency issues.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('sequence_number_action', sequence_number_action)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-sequence-number': _to_str(sequence_number),
-            'x-ms-sequence-number-action': _to_str(sequence_number_action),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_page_properties)
-
-    def resize_blob(
-            self, container_name, blob_name, content_length,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-
-        '''
-        Resizes a page blob to the specified size. If the specified value is less
-        than the current size of the blob, then all pages above the specified value
-        are cleared.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of existing blob.
-        :param int content_length:
-            Size to resize blob to.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: ETag and last modified properties for the updated Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-blob-content-length': _to_str(content_length),
-            'x-ms-lease-id': _to_str(lease_id),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match),
-        }
-
-        return self._perform_request(request, _parse_page_properties)
-
-    # ----Convenience APIs-----------------------------------------------------
-
-    def create_blob_from_path(
-            self, container_name, blob_name, file_path, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None, max_connections=2,
-            lease_id=None, if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new blob from a file path, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-        Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param str file_path:
-            Path of the file to upload as the blob content.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('file_path', file_path)
-
-        count = path.getsize(file_path)
-        with open(file_path, 'rb') as stream:
-            return self.create_blob_from_stream(
-                container_name=container_name,
-                blob_name=blob_name,
-                stream=stream,
-                count=count,
-                content_settings=content_settings,
-                metadata=metadata,
-                validate_content=validate_content,
-                progress_callback=progress_callback,
-                max_connections=max_connections,
-                lease_id=lease_id,
-                if_modified_since=if_modified_since,
-                if_unmodified_since=if_unmodified_since,
-                if_match=if_match,
-                if_none_match=if_none_match,
-                timeout=timeout,
-                premium_page_blob_tier=premium_page_blob_tier)
-
-    def create_blob_from_stream(
-            self, container_name, blob_name, stream, count, content_settings=None,
-            metadata=None, validate_content=False, progress_callback=None,
-            max_connections=2, lease_id=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            premium_page_blob_tier=None):
-        '''
-        Creates a new blob from a file/stream, or updates the content of an
-        existing blob, with automatic chunking and progress notifications.
-        Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the blob content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a page
-            blob cannot be created if the count is unknown.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set the blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use. Note that parallel upload 
-            requires the stream to be seekable.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        if count < 0:
-            raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        if count % _PAGE_ALIGNMENT != 0:
-            raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
-
-        cek, iv, encryption_data = None, None, None
-        if self.key_encryption_key is not None:
-            cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
-
-        response = self._create_blob(
-            container_name=container_name,
-            blob_name=blob_name,
-            content_length=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            lease_id=lease_id,
-            premium_page_blob_tier=premium_page_blob_tier,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            encryption_data=encryption_data
-        )
-
-        if count == 0:
-            return response
-
-        # _upload_blob_chunks returns the block ids for block blobs so resource_properties
-        # is passed as a parameter to get the last_modified and etag for page and append blobs.
-        # this info is not needed for block_blobs since _put_block_list is called after which gets this info
-        resource_properties = ResourceProperties()
-        _upload_blob_chunks(
-            blob_service=self,
-            container_name=container_name,
-            blob_name=blob_name,
-            blob_size=count,
-            block_size=self.MAX_PAGE_SIZE,
-            stream=stream,
-            max_connections=max_connections,
-            progress_callback=progress_callback,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            uploader_class=_PageBlobChunkUploader,
-            if_match=response.etag,
-            timeout=timeout,
-            content_encryption_key=cek,
-            initialization_vector=iv,
-            resource_properties=resource_properties
-        )
-
-        return resource_properties
-
-    def create_blob_from_bytes(
-            self, container_name, blob_name, blob, index=0, count=None,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, lease_id=None,
-            if_modified_since=None, if_unmodified_since=None, if_match=None,
-            if_none_match=None, timeout=None, premium_page_blob_tier=None):
-        '''
-        Creates a new blob from an array of bytes, or updates the content
-        of an existing blob, with automatic chunking and progress
-        notifications. Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to create or update.
-        :param bytes blob:
-            Content of blob as an array of bytes.
-        :param int index:
-            Start index in the byte array.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.blob.models.ContentSettings content_settings:
-            ContentSettings object used to set blob properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each page of the blob. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            blob.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far, and total is the
-            size of the blob, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param str lease_id:
-            Required if the blob has an active lease.
-        :param datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC. 
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :param str if_match:
-            An ETag value, or the wildcard character (*). Specify this header to perform
-            the operation only if the resource's ETag matches the value specified.
-        :param str if_none_match:
-            An ETag value, or the wildcard character (*). Specify this header
-            to perform the operation only if the resource's ETag does not match
-            the value specified. Specify the wildcard character (*) to perform
-            the operation only if the resource does not exist, and fail the
-            operation if it does exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :return: ETag and last modified properties for the Page Blob
-        :rtype: :class:`~azure.storage.blob.models.ResourceProperties`
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('blob', blob)
-        _validate_type_bytes('blob', blob)
-
-        if index < 0:
-            raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(blob) - index
-
-        stream = BytesIO(blob)
-        stream.seek(index)
-
-        return self.create_blob_from_stream(
-            container_name=container_name,
-            blob_name=blob_name,
-            stream=stream,
-            count=count,
-            content_settings=content_settings,
-            metadata=metadata,
-            validate_content=validate_content,
-            lease_id=lease_id,
-            progress_callback=progress_callback,
-            max_connections=max_connections,
-            if_modified_since=if_modified_since,
-            if_unmodified_since=if_unmodified_since,
-            if_match=if_match,
-            if_none_match=if_none_match,
-            timeout=timeout,
-            premium_page_blob_tier=premium_page_blob_tier)
-
-    def set_premium_page_blob_tier(
-            self, container_name, blob_name, premium_page_blob_tier,
-            timeout=None):
-        '''
-        Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
-
-        :param str container_name:
-            Name of existing container.
-        :param str blob_name:
-            Name of blob to update.
-        :param PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        '''
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('premium_page_blob_tier', premium_page_blob_tier)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'tier',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-access-tier': _to_str(premium_page_blob_tier)
-        }
-
-        self._perform_request(request)
-
-    def copy_blob(self, container_name, blob_name, copy_source,
-                  metadata=None,
-                  source_if_modified_since=None,
-                  source_if_unmodified_since=None,
-                  source_if_match=None, source_if_none_match=None,
-                  destination_if_modified_since=None,
-                  destination_if_unmodified_since=None,
-                  destination_if_match=None,
-                  destination_if_none_match=None,
-                  destination_lease_id=None,
-                  source_lease_id=None, timeout=None,
-                  premium_page_blob_tier=None):
-        '''
-        Copies a blob asynchronously. This operation returns a copy operation
-        properties object, including a copy ID you can use to check or abort the
-        copy operation. The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation must be a page blob. If the destination
-        blob already exists, it must be of the same blob type as the source blob.
-        Any existing destination blob will be overwritten.
-        The destination blob cannot be modified while a copy operation is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page
-        blob of the source blob's length, initially containing all zeroes. Then
-        the source page ranges are enumerated, and non-empty ranges are copied.
-
-        If the tier on the source blob is larger than the tier being passed to this
-        copy operation or if the size of the blob exceeds the tier being passed to
-        this copy operation then the operation will fail.
-
-        You can call get_blob_properties on the destination
-        blob to check the status of the copy operation. The final blob will be
-        committed when the copy completes.
-
-        :param str container_name:
-            Name of the destination container. The container must exist.
-        :param str blob_name:
-            Name of the destination blob. If the destination blob exists, it will
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str).
-        :param datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :param datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :param ETag source_if_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the source blob only if its ETag matches the value
-            specified. If the ETag values do not match, the Blob service returns
-            status code 412 (Precondition Failed). This header cannot be specified
-            if the source is an Azure File.
-        :param ETag source_if_none_match:
-            An ETag value, or the wildcard character (*). Specify this conditional
-            header to copy the blob only if its ETag does not match the value
-            specified. If the values are identical, the Blob service returns status
-            code 412 (Precondition Failed). This header cannot be specified if the
-            source is an Azure File.
-        :param datetime destination_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :param datetime destination_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :param ETag destination_if_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            matches the ETag value for an existing destination blob. If the ETag for
-            the destination blob does not match the ETag specified for If-Match, the
-            Blob service returns status code 412 (Precondition Failed).
-        :param ETag destination_if_none_match:
-            An ETag value, or the wildcard character (*). Specify an ETag value for
-            this conditional header to copy the blob only if the specified ETag value
-            does not match the ETag value for the destination blob. Specify the wildcard
-            character (*) to perform the operation only if the destination blob does not
-            exist. If the specified condition isn't met, the Blob service returns status
-            code 412 (Precondition Failed).
-        :param str destination_lease_id:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :param str source_lease_id:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param PageBlobTier premium_page_blob_tier:
-            A page blob tier value to set on the destination blob. The tier correlates to
-            the size of the blob and number of allowed IOPS. This is only applicable to
-            page blobs on premium storage accounts.
-            If the tier on the source blob is larger than the tier being passed to this
-            copy operation or if the size of the blob exceeds the tier being passed to
-            this copy operation then the operation will fail.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.blob.models.CopyProperties`
-        '''
-        return self._copy_blob(container_name, blob_name, copy_source,
-                               metadata, premium_page_blob_tier,
-                               source_if_modified_since, source_if_unmodified_since,
-                               source_if_match, source_if_none_match,
-                               destination_if_modified_since,
-                               destination_if_unmodified_since,
-                               destination_if_match,
-                               destination_if_none_match,
-                               destination_lease_id,
-                               source_lease_id, timeout,
-                               False)
-
-    # -----Helper methods-----------------------------------------------------
-
-    def _create_blob(
-            self, container_name, blob_name, content_length, content_settings=None,
-            sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None,
-            if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
-            encryption_data=None):
-        '''
-        See create_blob for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        :param str encryption_data:
-            The JSON formatted encryption metadata to upload as a part of the blob.
-            This should only be passed internally from other methods and only applied
-            when uploading entire blob contents immediately follows creation of the blob.
-        '''
-
-        _validate_not_none('container_name', container_name)
-        _validate_not_none('blob_name', blob_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-blob-type': _to_str(self.blob_type),
-            'x-ms-blob-content-length': _to_str(content_length),
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-blob-sequence-number': _to_str(sequence_number),
-            'x-ms-access-tier': _to_str(premium_page_blob_tier),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        if encryption_data is not None:
-            request.headers['x-ms-meta-encryptiondata'] = encryption_data
-
-        return self._perform_request(request, _parse_base_properties)
-
-    def _update_page(
-            self, container_name, blob_name, page, start_range, end_range,
-            validate_content=False, lease_id=None, if_sequence_number_lte=None,
-            if_sequence_number_lt=None, if_sequence_number_eq=None,
-            if_modified_since=None, if_unmodified_since=None,
-            if_match=None, if_none_match=None, timeout=None):
-        '''
-        See update_page for more details. This helper method
-        allows for encryption or other such special behavior because
-        it is safely handled by the library. These behaviors are
-        prohibited in the public version of this function.
-        '''
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(container_name, blob_name)
-        request.query = {
-            'comp': 'page',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-page-write': 'update',
-            'x-ms-lease-id': _to_str(lease_id),
-            'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
-            'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
-            'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
-            'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
-            'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
-            'If-Match': _to_str(if_match),
-            'If-None-Match': _to_str(if_none_match)
-        }
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            align_to_page=True)
-        request.body = _get_data_bytes_only('page', page)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        return self._perform_request(request, _parse_page_properties)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/blob/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/blob/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,275 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-    _QueryStringConstants,
-    _sign_string,
-)
-from ._constants import X_MS_VERSION
-from ..common._serialization import (
-    url_quote,
-)
-
-
-class BlobSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating blob and container access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key=None, user_delegation_key=None):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
-            Instead of an account key, the user could pass in a user delegation key.
-            A user delegation key can be obtained from the service by authenticating with an AAD identity;
-            this can be accomplished by calling get_user_delegation_key on any Blob service object.
-        '''
-        super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-        self.user_delegation_key = user_delegation_key
-
-    def generate_blob(self, container_name, blob_name, snapshot=None, permission=None,
-                      expiry=None, start=None, id=None, ip=None, protocol=None,
-                      cache_control=None, content_disposition=None,
-                      content_encoding=None, content_language=None,
-                      content_type=None):
-        '''
-        Generates a shared access signature for the blob or one of its snapshots.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to grant permission.
-        :param BlobPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = container_name + '/' + blob_name
-
-        sas = _BlobSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('b' if snapshot is None else 'bs')
-        sas.add_timestamp(snapshot)
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, resource_path,
-                                   user_delegation_key=self.user_delegation_key)
-
-        return sas.get_token()
-
-    def generate_container(self, container_name, permission=None, expiry=None,
-                           start=None, id=None, ip=None, protocol=None,
-                           cache_control=None, content_disposition=None,
-                           content_encoding=None, content_language=None,
-                           content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _BlobSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('c')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, container_name,
-                                   user_delegation_key=self.user_delegation_key)
-        return sas.get_token()
-
-
-class _BlobQueryStringConstants(_QueryStringConstants):
-    SIGNED_TIMESTAMP = 'snapshot'
-    SIGNED_OID = 'skoid'
-    SIGNED_TID = 'sktid'
-    SIGNED_KEY_START = 'skt'
-    SIGNED_KEY_EXPIRY = 'ske'
-    SIGNED_KEY_SERVICE = 'sks'
-    SIGNED_KEY_VERSION = 'skv'
-
-
-class _BlobSharedAccessHelper(_SharedAccessHelper):
-    def __init__(self):
-        super(_BlobSharedAccessHelper, self).__init__()
-
-    def add_timestamp(self, timestamp):
-        self._add_query(_BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp)
-
-    def get_value_to_append(self, query):
-        return_value = self.query_dict.get(query) or ''
-        return return_value + '\n'
-
-    def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None):
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/blob/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (self.get_value_to_append(_BlobQueryStringConstants.SIGNED_PERMISSION) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_START) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource)
-
-        if user_delegation_key is not None:
-            self._add_query(_BlobQueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid)
-            self._add_query(_BlobQueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid)
-            self._add_query(_BlobQueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start)
-            self._add_query(_BlobQueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry)
-            self._add_query(_BlobQueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service)
-            self._add_query(_BlobQueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version)
-
-            string_to_sign += \
-                (self.get_value_to_append(_BlobQueryStringConstants.SIGNED_OID) +
-                 self.get_value_to_append(_BlobQueryStringConstants.SIGNED_TID) +
-                 self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_START) +
-                 self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_EXPIRY) +
-                 self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_SERVICE) +
-                 self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_VERSION))
-        else:
-            string_to_sign += self.get_value_to_append(_BlobQueryStringConstants.SIGNED_IDENTIFIER)
-
-        string_to_sign += \
-            (self.get_value_to_append(_BlobQueryStringConstants.SIGNED_IP) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_PROTOCOL) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_VERSION) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_RESOURCE) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_TIMESTAMP) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CACHE_CONTROL) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_ENCODING) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-             self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_BlobQueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key if user_delegation_key is None else user_delegation_key.value,
-                                     string_to_sign))
-
-    def get_token(self):
-        # a conscious decision was made to exclude the timestamp in the generated token
-        # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp
-        exclude = [_BlobQueryStringConstants.SIGNED_TIMESTAMP]
-        return '&'.join(['{0}={1}'.format(n, url_quote(v))
-                         for n, v in self.query_dict.items() if v is not None and n not in exclude])
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/__init__.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,39 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ._constants import (
-    __author__,
-    __version__,
-    DEFAULT_X_MS_VERSION,
-)
-from .cloudstorageaccount import CloudStorageAccount
-from .models import (
-    RetentionPolicy,
-    Logging,
-    Metrics,
-    CorsRule,
-    DeleteRetentionPolicy,
-    StaticWebsite,
-    ServiceProperties,
-    AccessPolicy,
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-    Protocol,
-    ServiceStats,
-    GeoReplication,
-    LocationMode,
-    RetryContext,
-)
-from .retry import (
-    ExponentialRetry,
-    LinearRetry,
-    no_retry,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-from .tokencredential import TokenCredential
-from ._error import AzureSigningError
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_auth.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_auth.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_auth.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_auth.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,129 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ._common_conversion import (
-    _sign_string,
-)
-from ._constants import (
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_SECONDARY_NAME
-)
-import sys
-if sys.version_info >= (3,):
-    from urllib.parse import parse_qsl
-else:
-    from urlparse import parse_qsl
-
-
-import logging
-logger = logging.getLogger(__name__)
-
-from ._error import (
-    AzureSigningError,
-    _wrap_exception,
-)
-
-
-class _StorageSharedKeyAuthentication(object):
-    def __init__(self, account_name, account_key, is_emulated=False):
-        self.account_name = account_name
-        self.account_key = account_key
-        self.is_emulated = is_emulated
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = request.path.split('?')[0]
-
-        # for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME
-        # as this is how the emulator works
-        if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1:
-            # only replace the first instance
-            uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
-
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        try:
-            signature = _sign_string(self.account_key, string_to_sign)
-            auth_string = 'SharedKey ' + self.account_name + ':' + signature
-            request.headers['Authorization'] = auth_string
-        except Exception as ex:
-            # Wrap any error that occurred as signing error
-            # Doing so will clarify/locate the source of problem
-            raise _wrap_exception(ex, AzureSigningError)
-
-
-class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
-    def sign_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        logger.debug("String_to_sign=%s", string_to_sign)
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value is not None:
-                string_to_sign += '\n' + name.lower() + ':' + value
-
-        return string_to_sign
-
-
-class _StorageNoAuthentication(object):
-    def sign_request(self, request):
-        pass
-
-
-class _StorageSASAuthentication(object):
-    def __init__(self, sas_token):
-        # ignore ?-prefix (added by tools such as Azure Portal) on sas tokens
-        # doing so avoids double question marks when signing
-        if sas_token[0] == '?':
-            sas_token = sas_token[1:]
-
-        self.sas_qs = parse_qsl(sas_token)
-
-    def sign_request(self, request):
-        # if 'sig' is present, then the request has already been signed
-        # as is the case when performing retries
-        if 'sig' in request.query:
-            return
-
-        request.query.update(self.sas_qs)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_common_conversion.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_common_conversion.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_common_conversion.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_common_conversion.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,126 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-import sys
-from io import (SEEK_SET)
-
-from dateutil.tz import tzutc
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
-)
-from .models import (
-    _unicode_type,
-)
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-def _int_to_str(value):
-    return str(int(value)) if value is not None else None
-
-
-def _bool_to_str(value):
-    if value is None:
-        return None
-
-    if isinstance(value, bool):
-        if value:
-            return 'true'
-        else:
-            return 'false'
-
-    return str(value)
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _datetime_to_utc_string(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value is None:
-        return None
-
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-
-    return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
-
-
-def _encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def _decode_base64_to_bytes(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def _decode_base64_to_text(data):
-    decoded_bytes = _decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def _sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = _decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, _unicode_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, _unicode_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = _encode_base64(digest)
-    return encoded_digest
-
-
-def _get_content_md5(data):
-    md5 = hashlib.md5()
-    if isinstance(data, bytes):
-        md5.update(data)
-    elif hasattr(data, 'read'):
-        pos = 0
-        try:
-            pos = data.tell()
-        except:
-            pass
-        for chunk in iter(lambda: data.read(4096), b""):
-            md5.update(chunk)
-        try:
-            data.seek(pos, SEEK_SET)
-        except (AttributeError, IOError):
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
-    else:
-        raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
-
-    return base64.b64encode(md5.digest()).decode('utf-8')
-
-
-def _lower(text):
-    return text.lower()
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_connection.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_connection.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_connection.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_connection.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,161 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from urllib.parse import urlparse
-else:
-    from urlparse import urlparse
-
-from ._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_SECONDARY_NAME,
-    DEV_ACCOUNT_KEY,
-    DEV_BLOB_HOST,
-    DEV_QUEUE_HOST,
-)
-from ._error import (
-    _ERROR_STORAGE_MISSING_INFO,
-)
-
-_EMULATOR_ENDPOINTS = {
-    'blob': DEV_BLOB_HOST,
-    'queue': DEV_QUEUE_HOST,
-    'file': '',
-}
-
-_CONNECTION_ENDPOINTS = {
-    'blob': 'BlobEndpoint',
-    'queue': 'QueueEndpoint',
-    'file': 'FileEndpoint',
-}
-
-_CONNECTION_ENDPOINTS_SECONDARY = {
-    'blob': 'BlobSecondaryEndpoint',
-    'queue': 'QueueSecondaryEndpoint',
-    'file': 'FileSecondaryEndpoint',
-}
-
-
-class _ServiceParameters(object):
-    def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None,
-                 is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, 
-                 custom_domain=None, custom_domain_secondary=None):
-
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.token_credential = token_credential
-        self.protocol = protocol or DEFAULT_PROTOCOL
-        self.is_emulated = is_emulated
-
-        if is_emulated:
-            self.account_name = DEV_ACCOUNT_NAME
-            self.protocol = 'http'
-
-            # Only set the account key if a sas_token is not present to allow sas to be used with the emulator
-            self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
-            emulator_endpoint = _EMULATOR_ENDPOINTS[service] if custom_domain is None else custom_domain
-
-            self.primary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_NAME)
-            self.secondary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_SECONDARY_NAME)
-        else:
-            # Strip whitespace from the key
-            if self.account_key:
-                self.account_key = self.account_key.strip()
-
-            endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
-
-            # Setup the primary endpoint
-            if custom_domain:
-                parsed_url = urlparse(custom_domain)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.primary_endpoint = parsed_url.netloc + path
-                self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme
-            else:
-                if not self.account_name:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-                self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
-
-            # Setup the secondary endpoint
-            if custom_domain_secondary:
-                if not custom_domain:
-                    raise ValueError(_ERROR_STORAGE_MISSING_INFO)   
-
-                parsed_url = urlparse(custom_domain_secondary)
-
-                # Trim any trailing slashes from the path
-                path = parsed_url.path.rstrip('/')
-
-                self.secondary_endpoint = parsed_url.netloc + path
-            else:
-                if self.account_name:
-                    self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
-                else:
-                    self.secondary_endpoint = None
-
-    @staticmethod
-    def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None,
-                               is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None,
-                               request_session=None, connection_string=None, socket_timeout=None):
-        if connection_string:
-            params = _ServiceParameters._from_connection_string(connection_string, service)
-        elif is_emulated:
-            params = _ServiceParameters(service, is_emulated=True, custom_domain=custom_domain)
-        elif account_name:
-            if protocol.lower() != 'https' and token_credential is not None:
-                raise ValueError("Token credential is only supported with HTTPS.")
-            params = _ServiceParameters(service,
-                                        account_name=account_name,
-                                        account_key=account_key,
-                                        sas_token=sas_token,
-                                        token_credential=token_credential,
-                                        is_emulated=is_emulated,
-                                        protocol=protocol,
-                                        endpoint_suffix=endpoint_suffix,
-                                        custom_domain=custom_domain)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        params.request_session = request_session
-        params.socket_timeout = socket_timeout
-        return params
-
-    @staticmethod
-    def _from_connection_string(connection_string, service):
-        # Split into key=value pairs removing empties, then split the pairs into a dict
-        config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
-
-        # Authentication
-        account_name = config.get('AccountName')
-        account_key = config.get('AccountKey')
-        sas_token = config.get('SharedAccessSignature')
-
-        # Emulator
-        is_emulated = config.get('UseDevelopmentStorage')
-
-        # Basic URL Configuration
-        protocol = config.get('DefaultEndpointsProtocol')
-        endpoint_suffix = config.get('EndpointSuffix')
-
-        # Custom URLs
-        endpoint = config.get(_CONNECTION_ENDPOINTS[service])
-        endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
-
-        return _ServiceParameters(service,
-                                  account_name=account_name,
-                                  account_key=account_key,
-                                  sas_token=sas_token,
-                                  is_emulated=is_emulated,
-                                  protocol=protocol,
-                                  endpoint_suffix=endpoint_suffix,
-                                  custom_domain=endpoint,
-                                  custom_domain_secondary=endpoint_secondary)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_constants.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,51 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import platform
-import sys
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '2.0.0'
-
-# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)'
-# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package
-USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__)
-USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(),
-                                                          platform.python_version(), platform.system(),
-                                                          platform.release())
-
-# default values for common package, in case it is used directly
-DEFAULT_X_MS_VERSION = '2018-03-28'
-DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX)
-
-# Live ServiceClient URLs
-SERVICE_HOST_BASE = 'core.windows.net'
-DEFAULT_PROTOCOL = 'https'
-
-# Development ServiceClient URLs
-DEV_BLOB_HOST = '127.0.0.1:10000'
-DEV_QUEUE_HOST = '127.0.0.1:10001'
-
-# Default credentials for Development Storage Service
-DEV_ACCOUNT_NAME = 'devstoreaccount1'
-DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary'
-DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
-
-# Socket timeout in seconds
-DEFAULT_SOCKET_TIMEOUT = 20
-
-# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
-# The socket timeout is now the maximum total duration to send all data.
-if sys.version_info >= (3, 5):
-    # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
-    # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
-    DEFAULT_SOCKET_TIMEOUT = (20, 2000)
-
-# Encryption constants
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
-
-_AUTHORIZATION_HEADER_NAME = 'Authorization'
-_COPY_SOURCE_HEADER_NAME = 'x-ms-copy-source'
-_REDACTED_VALUE = 'REDACTED'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,384 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-from ._common_conversion import _to_str
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    ServiceProperties,
-    Logging,
-    Metrics,
-    CorsRule,
-    AccessPolicy,
-    _dict,
-    GeoReplication,
-    ServiceStats,
-    DeleteRetentionPolicy,
-    StaticWebsite,
-)
-
-
-def _to_int(value):
-    return value if value is None else int(value)
-
-
-def _bool(value):
-    return value.lower() == 'true'
-
-
-def _to_upper_str(value):
-    return _to_str(value).upper() if value is not None else None
-
-
-def _get_download_size(start_range, end_range, resource_size):
-    if start_range is not None:
-        end_range = end_range if end_range else (resource_size if resource_size else None)
-        if end_range is not None:
-            return end_range - start_range
-        else:
-            return None
-    else:
-        return resource_size
-
-
-GET_PROPERTIES_ATTRIBUTE_MAP = {
-    'last-modified': (None, 'last_modified', parser.parse),
-    'etag': (None, 'etag', _to_str),
-    'x-ms-blob-type': (None, 'blob_type', _to_str),
-    'content-length': (None, 'content_length', _to_int),
-    'content-range': (None, 'content_range', _to_str),
-    'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int),
-    'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int),
-    'x-ms-blob-public-access': (None, 'public_access', _to_str),
-    'x-ms-access-tier': (None, 'blob_tier', _to_str),
-    'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse),
-    'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
-    'x-ms-archive-status': (None, 'rehydration_status', _to_str),
-    'x-ms-share-quota': (None, 'quota', _to_int),
-    'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
-    'x-ms-creation-time': (None, 'creation_time', parser.parse),
-    'content-type': ('content_settings', 'content_type', _to_str),
-    'cache-control': ('content_settings', 'cache_control', _to_str),
-    'content-encoding': ('content_settings', 'content_encoding', _to_str),
-    'content-disposition': ('content_settings', 'content_disposition', _to_str),
-    'content-language': ('content_settings', 'content_language', _to_str),
-    'content-md5': ('content_settings', 'content_md5', _to_str),
-    'x-ms-lease-status': ('lease', 'status', _to_str),
-    'x-ms-lease-state': ('lease', 'state', _to_str),
-    'x-ms-lease-duration': ('lease', 'duration', _to_str),
-    'x-ms-copy-id': ('copy', 'id', _to_str),
-    'x-ms-copy-source': ('copy', 'source', _to_str),
-    'x-ms-copy-status': ('copy', 'status', _to_str),
-    'x-ms-copy-progress': ('copy', 'progress', _to_str),
-    'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse),
-    'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str),
-    'x-ms-copy-status-description': ('copy', 'status_description', _to_str),
-    'x-ms-has-immutability-policy': (None, 'has_immutability_policy', _bool),
-    'x-ms-has-legal-hold': (None, 'has_legal_hold', _bool),
-}
-
-
-def _parse_metadata(response):
-    '''
-    Extracts out resource metadata information.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    metadata = _dict()
-    for key, value in response.headers.items():
-        if key.lower().startswith('x-ms-meta-'):
-            metadata[key[10:]] = _to_str(value)
-
-    return metadata
-
-
-def _parse_properties(response, result_class):
-    '''
-    Extracts out resource properties and metadata information.
-    Ignores the standard http headers.
-    '''
-
-    if response is None or response.headers is None:
-        return None
-
-    props = result_class()
-    for key, value in response.headers.items():
-        info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key)
-        if info:
-            if info[0] is None:
-                setattr(props, info[1], info[2](value))
-            else:
-                attr = getattr(props, info[0])
-                setattr(attr, info[1], info[2](value))
-
-    if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None:
-        props.blob_tier = _to_upper_str(props.blob_tier)
-    return props
-
-
-def _parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def _convert_xml_to_signed_identifiers(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <SignedIdentifiers>
-      <SignedIdentifier>
-        <Id>unique-value</Id>
-        <AccessPolicy>
-          <Start>start-time</Start>
-          <Expiry>expiry-time</Expiry>
-          <Permission>abbreviated-permission-list</Permission>
-        </AccessPolicy>
-      </SignedIdentifier>
-    </SignedIdentifiers>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    list_element = ETree.fromstring(response.body)
-    signed_identifiers = _dict()
-
-    for signed_identifier_element in list_element.findall('SignedIdentifier'):
-        # Id element
-        id = signed_identifier_element.find('Id').text
-
-        # Access policy element
-        access_policy = AccessPolicy()
-        access_policy_element = signed_identifier_element.find('AccessPolicy')
-        if access_policy_element is not None:
-            start_element = access_policy_element.find('Start')
-            if start_element is not None:
-                access_policy.start = parser.parse(start_element.text)
-
-            expiry_element = access_policy_element.find('Expiry')
-            if expiry_element is not None:
-                access_policy.expiry = parser.parse(expiry_element.text)
-
-            access_policy.permission = access_policy_element.findtext('Permission')
-
-        signed_identifiers[id] = access_policy
-
-    return signed_identifiers
-
-
-def _convert_xml_to_service_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceStats>
-      <GeoReplication>      
-          <Status>live|bootstrap|unavailable</Status>
-          <LastSyncTime>sync-time|<empty></LastSyncTime>
-      </GeoReplication>
-    </StorageServiceStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_stats_element = ETree.fromstring(response.body)
-
-    geo_replication_element = service_stats_element.find('GeoReplication')
-
-    geo_replication = GeoReplication()
-    geo_replication.status = geo_replication_element.find('Status').text
-    last_sync_time = geo_replication_element.find('LastSyncTime').text
-    geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None
-
-    service_stats = ServiceStats()
-    service_stats.geo_replication = geo_replication
-    return service_stats
-
-
-def _convert_xml_to_service_properties(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-        <DeleteRetentionPolicy>
-             <Enabled>true|false</Enabled>
-             <Days>number-of-days</Days>
-        </DeleteRetentionPolicy>
-        <StaticWebsite>
-            <Enabled>true|false</Enabled>
-            <IndexDocument></IndexDocument>
-            <ErrorDocument404Path></ErrorDocument404Path>
-        </StaticWebsite>
-    </StorageServiceProperties>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    service_properties_element = ETree.fromstring(response.body)
-    service_properties = ServiceProperties()
-
-    # Logging
-    logging = service_properties_element.find('Logging')
-    if logging is not None:
-        service_properties.logging = Logging()
-        service_properties.logging.version = logging.find('Version').text
-        service_properties.logging.delete = _bool(logging.find('Delete').text)
-        service_properties.logging.read = _bool(logging.find('Read').text)
-        service_properties.logging.write = _bool(logging.find('Write').text)
-
-        _convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
-                                         service_properties.logging.retention_policy)
-    # HourMetrics
-    hour_metrics_element = service_properties_element.find('HourMetrics')
-    if hour_metrics_element is not None:
-        service_properties.hour_metrics = Metrics()
-        _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
-
-    # MinuteMetrics
-    minute_metrics_element = service_properties_element.find('MinuteMetrics')
-    if minute_metrics_element is not None:
-        service_properties.minute_metrics = Metrics()
-        _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
-
-    # CORS
-    cors = service_properties_element.find('Cors')
-    if cors is not None:
-        service_properties.cors = list()
-        for rule in cors.findall('CorsRule'):
-            allowed_origins = rule.find('AllowedOrigins').text.split(',')
-
-            allowed_methods = rule.find('AllowedMethods').text.split(',')
-
-            max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
-
-            cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
-
-            exposed_headers = rule.find('ExposedHeaders').text
-            if exposed_headers is not None:
-                cors_rule.exposed_headers = exposed_headers.split(',')
-
-            allowed_headers = rule.find('AllowedHeaders').text
-            if allowed_headers is not None:
-                cors_rule.allowed_headers = allowed_headers.split(',')
-
-            service_properties.cors.append(cors_rule)
-
-    # Target version
-    target_version = service_properties_element.find('DefaultServiceVersion')
-    if target_version is not None:
-        service_properties.target_version = target_version.text
-
-    # DeleteRetentionPolicy
-    delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy')
-    if delete_retention_policy_element is not None:
-        service_properties.delete_retention_policy = DeleteRetentionPolicy()
-        policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text)
-        service_properties.delete_retention_policy.enabled = policy_enabled
-
-        if policy_enabled:
-            service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text)
-
-    # StaticWebsite
-    static_website_element = service_properties_element.find('StaticWebsite')
-    if static_website_element is not None:
-        service_properties.static_website = StaticWebsite()
-        service_properties.static_website.enabled = _bool(static_website_element.find('Enabled').text)
-
-        index_document_element = static_website_element.find('IndexDocument')
-        if index_document_element is not None:
-            service_properties.static_website.index_document = index_document_element.text
-
-        error_document_element = static_website_element.find('ErrorDocument404Path')
-        if error_document_element is not None:
-            service_properties.static_website.error_document_404_path = error_document_element.text
-
-    return service_properties
-
-
-def _convert_xml_to_metrics(xml, metrics):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    metrics.version = xml.find('Version').text
-
-    # Enabled
-    metrics.enabled = _bool(xml.find('Enabled').text)
-
-    # IncludeAPIs
-    include_apis_element = xml.find('IncludeAPIs')
-    if include_apis_element is not None:
-        metrics.include_apis = _bool(include_apis_element.text)
-
-    # RetentionPolicy
-    _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy)
-
-
-def _convert_xml_to_retention_policy(xml, retention_policy):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    retention_policy.enabled = _bool(xml.find('Enabled').text)
-
-    # Days
-    days_element = xml.find('Days')
-    if days_element is not None:
-        retention_policy.days = int(days_element.text)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_encryption.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,233 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-
-from ._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-    __version__,
-)
-from ._error import (
-    _ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
-    _validate_not_none,
-    _validate_encryption_protocol_version,
-    _validate_key_encryption_key_unwrap,
-    _validate_kek_id,
-)
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier, 
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-    
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    except KeyError:
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer 
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol)
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_unwrap(key_encryption_key)
-    _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid())
-
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_error.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,218 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from sys import version_info
-
-if version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_str(value):
-    return _str(value) if value is not None else None
-
-
-from azure.common import (
-    AzureHttpError,
-    AzureConflictHttpError,
-    AzureMissingResourceHttpError,
-    AzureException,
-)
-from ._constants import (
-    _ENCRYPTION_PROTOCOL_V1,
-)
-
-_ERROR_CONFLICT = 'Conflict ({0})'
-_ERROR_NOT_FOUND = 'Not found ({0})'
-_ERROR_UNKNOWN = 'Unknown error ({0})'
-_ERROR_STORAGE_MISSING_INFO = \
-    'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
-_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
-    'The emulator does not support the file service.'
-_ERROR_ACCESS_POLICY = \
-    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
-    'instance'
-_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
-_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
-_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
-_ERROR_VALUE_NONE = '{0} should not be None.'
-_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
-_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
-_ERROR_START_END_NEEDED_FOR_MD5 = \
-    'Both end_range and start_range need to be specified ' + \
-    'for getting content MD5.'
-_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
-    'Getting content MD5 for a range greater than 4MB ' + \
-    'is not supported.'
-_ERROR_MD5_MISMATCH = \
-    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
-_ERROR_TOO_MANY_ACCESS_POLICIES = \
-    'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
-    'Encryption version is not supported.'
-_ERROR_DECRYPTION_FAILURE = \
-    'Decryption failed'
-_ERROR_ENCRYPTION_REQUIRED = \
-    'Encryption required but no key was provided.'
-_ERROR_DECRYPTION_REQUIRED = \
-    'Decryption required but neither key nor resolver was provided.' + \
-    ' If you do not want to decypt, please do not set the require encryption flag.'
-_ERROR_INVALID_KID = \
-    'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
-_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
-    'Specified encryption algorithm is not supported.'
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
-                                           ' for this method.'
-_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
-_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
-                            'Data was either not encrypted or metadata has been lost.'
-
-
-def _dont_fail_on_exist(error):
-    ''' don't throw exception if the resource exists.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureConflictHttpError):
-        return False
-    else:
-        raise error
-
-
-def _dont_fail_not_exist(error):
-    ''' don't throw exception if the resource doesn't exist.
-    This is called by create_* APIs with fail_on_exist=False'''
-    if isinstance(error, AzureMissingResourceHttpError):
-        return False
-    else:
-        raise error
-
-
-def _http_error_handler(http_error):
-    ''' Simple error handler for azure.'''
-    message = str(http_error)
-    error_code = None
-
-    if 'x-ms-error-code' in http_error.respheader:
-        error_code = http_error.respheader['x-ms-error-code']
-        message += ' ErrorCode: ' + error_code
-
-    if http_error.respbody is not None:
-        message += '\n' + http_error.respbody.decode('utf-8-sig')
-
-    ex = AzureHttpError(message, http_error.status)
-    ex.error_code = error_code
-
-    raise ex
-
-
-def _validate_type_bytes(param_name, param):
-    if not isinstance(param, bytes):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _validate_type_bytes_or_stream(param_name, param):
-    if not (isinstance(param, bytes) or hasattr(param, 'read')):
-        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError(_ERROR_VALUE_NONE.format(param_name))
-
-
-def _validate_content_match(server_md5, computed_md5):
-    if server_md5 != computed_md5:
-        raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
-
-
-def _validate_access_policies(identifiers):
-    if identifiers and len(identifiers) > 5:
-        raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-def _validate_key_encryption_key_unwrap(kek):
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-
-
-def _validate_encryption_required(require_encryption, kek):
-    if require_encryption and (kek is None):
-        raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
-
-
-def _validate_decryption_required(require_encryption, kek, resolver):
-    if (require_encryption and (kek is None) and
-            (resolver is None)):
-        raise ValueError(_ERROR_DECRYPTION_REQUIRED)
-
-
-def _validate_encryption_protocol_version(encryption_protocol):
-    if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
-
-
-def _validate_kek_id(kid, resolved_id):
-    if not (kid == resolved_id):
-        raise ValueError(_ERROR_INVALID_KID)
-
-
-def _validate_encryption_unsupported(require_encryption, key_encryption_key):
-    if require_encryption or (key_encryption_key is not None):
-        raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-
-def _validate_user_delegation_key(user_delegation_key):
-    _validate_not_none('user_delegation_key.signed_oid', user_delegation_key.signed_oid)
-    _validate_not_none('user_delegation_key.signed_tid', user_delegation_key.signed_tid)
-    _validate_not_none('user_delegation_key.signed_start', user_delegation_key.signed_start)
-    _validate_not_none('user_delegation_key.signed_expiry', user_delegation_key.signed_expiry)
-    _validate_not_none('user_delegation_key.signed_version', user_delegation_key.signed_version)
-    _validate_not_none('user_delegation_key.signed_service', user_delegation_key.signed_service)
-    _validate_not_none('user_delegation_key.value', user_delegation_key.value)
-
-
-# wraps a given exception with the desired exception type
-def _wrap_exception(ex, desired_type):
-    msg = ""
-    if len(ex.args) > 0:
-        msg = ex.args[0]
-    if version_info >= (3,):
-        # Automatic chaining in Python 3 means we keep the trace
-        return desired_type(msg)
-    else:
-        # There isn't a good solution in 2 for keeping the stack trace
-        # in general, or that will not result in an error in 3
-        # However, we can keep the previous error type and message
-        # TODO: In the future we will log the trace
-        return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
-
-
-class AzureSigningError(AzureException):
-    """
-    Represents a fatal error when attempting to sign a request.
-    In general, the cause of this exception is user error. For example, the given account key is not valid.
-    Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
-    """
-    pass
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_http/__init__.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_http/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_http/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_http/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,74 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-
-class HTTPError(Exception):
-    '''
-    Represents an HTTP Exception when response status code >= 300.
-
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar list headers:
-        the returned headers, as a list of (name, value) pairs
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, respheader, respbody):
-        self.status = status
-        self.respheader = respheader
-        self.respbody = respbody
-        Exception.__init__(self, message)
-
-
-class HTTPResponse(object):
-    '''
-    Represents a response from an HTTP request.
-    
-    :ivar int status:
-        the status code of the response
-    :ivar str message:
-        the message
-    :ivar dict headers:
-        the returned headers
-    :ivar bytes body:
-        the body of the response
-    '''
-
-    def __init__(self, status, message, headers, body):
-        self.status = status
-        self.message = message
-        self.headers = headers
-        self.body = body
-
-
-class HTTPRequest(object):
-    '''
-    Represents an HTTP Request.
-
-    :ivar str host:
-        the host name to connect to
-    :ivar str method:
-        the method to use to connect (string such as GET, POST, PUT, etc.)
-    :ivar str path:
-        the uri fragment
-    :ivar dict query:
-        query parameters
-    :ivar dict headers:
-        header values
-    :ivar bytes body:
-        the body of the request.
-    '''
-
-    def __init__(self):
-        self.host = ''
-        self.method = ''
-        self.path = ''
-        self.query = {}  # list of (name, value)
-        self.headers = {}  # list of (header name, header value)
-        self.body = ''
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_http/httpclient.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_http/httpclient.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_http/httpclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_http/httpclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-from . import HTTPResponse
-from .._serialization import _get_data_bytes_or_stream_only
-logger = logging.getLogger(__name__)
-
-
-class _HTTPClient(object):
-    '''
-    Takes the request and sends it to cloud service and returns the response.
-    '''
-
-    def __init__(self, protocol=None, session=None, timeout=None):
-        '''
-        :param str protocol:
-            http or https.
-        :param requests.Session session:
-            session object created with requests library (or compatible).
-        :param int timeout:
-            timeout for the http request, in seconds.
-        '''
-        self.protocol = protocol
-        self.session = session
-        self.timeout = timeout
-
-        # By default, requests adds an Accept:*/* and Accept-Encoding to the session, 
-        # which causes issues with some Azure REST APIs. Removing these here gives us 
-        # the flexibility to add it back on a case by case basis.
-        if 'Accept' in self.session.headers:
-            del self.session.headers['Accept']
-
-        if 'Accept-Encoding' in self.session.headers:
-            del self.session.headers['Accept-Encoding']
-
-        self.proxies = None
-
-    def set_proxy(self, host, port, user, password):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        Note that we set the proxies directly on the request later on rather than
-        using the session object as requests has a bug where session proxy is ignored
-        in favor of environment proxy. So, auth will not work unless it is passed
-        directly when making the request as this overrides both.
-
-        :param str host:
-            Address of the proxy. Ex: '192.168.0.100'
-        :param int port:
-            Port of the proxy. Ex: 6000
-        :param str user:
-            User for proxy authorization.
-        :param str password:
-            Password for proxy authorization.
-        '''
-        if user and password:
-            proxy_string = '{}:{}@{}:{}'.format(user, password, host, port)
-        else:
-            proxy_string = '{}:{}'.format(host, port)
-
-        self.proxies = {'http': 'http://{}'.format(proxy_string),
-                        'https': 'https://{}'.format(proxy_string)}
-
-    def perform_request(self, request):
-        '''
-        Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If 
-        the response code indicates an error, raise an HTTPError.    
-        
-        :param HTTPRequest request:
-            The request to serialize and send.
-        :return: An HTTPResponse containing the parsed HTTP response.
-        :rtype: :class:`~azure.storage.common._http.HTTPResponse`
-        '''
-        # Verify the body is in bytes or either a file-like/stream object
-        if request.body:
-            request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-
-        # Construct the URI
-        uri = self.protocol.lower() + '://' + request.host + request.path
-
-        # Send the request
-        response = self.session.request(request.method,
-                                        uri,
-                                        params=request.query,
-                                        headers=request.headers,
-                                        data=request.body or None,
-                                        timeout=self.timeout,
-                                        proxies=self.proxies)
-
-        # Parse the response
-        status = int(response.status_code)
-        response_headers = {}
-        for key, name in response.headers.items():
-            # Preserve the case of metadata
-            if key.lower().startswith('x-ms-meta-'):
-                response_headers[key] = name
-            else:
-                response_headers[key.lower()] = name
-
-        wrap = HTTPResponse(status, response.reason, response_headers, response.content)
-        response.close()
-
-        return wrap
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_serialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,371 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-import uuid
-from datetime import date
-from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation)
-from os import fstat
-from time import time
-from wsgiref.handlers import format_date_time
-
-from dateutil.tz import tzutc
-
-if sys.version_info >= (3,):
-    from urllib.parse import quote as url_quote
-else:
-    from urllib2 import quote as url_quote
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ._error import (
-    _ERROR_VALUE_SHOULD_BE_BYTES,
-    _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
-    _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM
-)
-from .models import (
-    _unicode_type,
-)
-from ._common_conversion import (
-    _str,
-)
-
-
-def _to_utc_datetime(value):
-    # Azure expects the date value passed in to be UTC.
-    # Azure will always return values as UTC.
-    # If a date is passed in without timezone info, it is assumed to be UTC.
-    if value.tzinfo:
-        value = value.astimezone(tzutc())
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
-
-
-def _update_request(request, x_ms_version, user_agent_string):
-    # Verify body
-    if request.body:
-        request.body = _get_data_bytes_or_stream_only('request.body', request.body)
-        length = _len_plus(request.body)
-
-        # only scenario where this case is plausible is if the stream object is not seekable.
-        if length is None:
-            raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM)
-
-        # if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
-        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
-            request.headers['Content-Length'] = str(length)
-
-    # append addtional headers based on the service
-    request.headers['x-ms-version'] = x_ms_version
-    request.headers['User-Agent'] = user_agent_string
-    request.headers['x-ms-client-request-id'] = str(uuid.uuid1())
-
-    # If the host has a path component (ex local storage), move it
-    path = request.host.split('/', 1)
-    if len(path) == 2:
-        request.host = path[0]
-        request.path = '/{}{}'.format(path[1], request.path)
-
-    # Encode and optionally add local storage prefix to path
-    request.path = url_quote(request.path, '/()$=\',~')
-
-
-def _add_metadata_headers(metadata, request):
-    if metadata:
-        if not request.headers:
-            request.headers = {}
-        for name, value in metadata.items():
-            request.headers['x-ms-meta-' + name] = value
-
-
-def _add_date_header(request):
-    current_time = format_date_time(time())
-    request.headers['x-ms-date'] = current_time
-
-
-def _get_data_bytes_only(param_name, param_value):
-    '''Validates the request body passed in and converts it to bytes
-    if our policy allows it.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
-
-
-def _get_data_bytes_or_stream_only(param_name, param_value):
-    '''Validates the request body passed in is a stream/file-like or bytes
-    object.'''
-    if param_value is None:
-        return b''
-
-    if isinstance(param_value, bytes) or hasattr(param_value, 'read'):
-        return param_value
-
-    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
-
-
-def _get_request_body(request_body):
-    '''Converts an object into a request body.  If it's None
-    we'll return an empty string, if it's one of our objects it'll
-    convert it to XML and return it.  Otherwise we just use the object
-    directly'''
-    if request_body is None:
-        return b''
-
-    if isinstance(request_body, bytes) or isinstance(request_body, IOBase):
-        return request_body
-
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    request_body = str(request_body)
-    if isinstance(request_body, _unicode_type):
-        return request_body.encode('utf-8')
-
-    return request_body
-
-
-def _convert_signed_identifiers_to_xml(signed_identifiers):
-    if signed_identifiers is None:
-        return ''
-
-    sis = ETree.Element('SignedIdentifiers')
-    for id, access_policy in signed_identifiers.items():
-        # Root signed identifers element
-        si = ETree.SubElement(sis, 'SignedIdentifier')
-
-        # Id element
-        ETree.SubElement(si, 'Id').text = id
-
-        # Access policy element
-        policy = ETree.SubElement(si, 'AccessPolicy')
-
-        if access_policy.start:
-            start = access_policy.start
-            if isinstance(access_policy.start, date):
-                start = _to_utc_datetime(start)
-            ETree.SubElement(policy, 'Start').text = start
-
-        if access_policy.expiry:
-            expiry = access_policy.expiry
-            if isinstance(access_policy.expiry, date):
-                expiry = _to_utc_datetime(expiry)
-            ETree.SubElement(policy, 'Expiry').text = expiry
-
-        if access_policy.permission:
-            ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
-                                       cors, target_version=None, delete_retention_policy=None, static_website=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <StorageServiceProperties>
-        <Logging>
-            <Version>version-number</Version>
-            <Delete>true|false</Delete>
-            <Read>true|false</Read>
-            <Write>true|false</Write>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </Logging>
-        <HourMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </HourMetrics>
-        <MinuteMetrics>
-            <Version>version-number</Version>
-            <Enabled>true|false</Enabled>
-            <IncludeAPIs>true|false</IncludeAPIs>
-            <RetentionPolicy>
-                <Enabled>true|false</Enabled>
-                <Days>number-of-days</Days>
-            </RetentionPolicy>
-        </MinuteMetrics>
-        <Cors>
-            <CorsRule>
-                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
-                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
-                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
-                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
-                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
-            </CorsRule>
-        </Cors>
-        <DeleteRetentionPolicy>
-            <Enabled>true|false</Enabled>
-            <Days>number-of-days</Days>
-        </DeleteRetentionPolicy>
-        <StaticWebsite>
-            <Enabled>true|false</Enabled>
-            <IndexDocument></IndexDocument>
-            <ErrorDocument404Path></ErrorDocument404Path>
-        </StaticWebsite>
-    </StorageServiceProperties>
-    '''
-    service_properties_element = ETree.Element('StorageServiceProperties')
-
-    # Logging
-    if logging:
-        logging_element = ETree.SubElement(service_properties_element, 'Logging')
-        ETree.SubElement(logging_element, 'Version').text = logging.version
-        ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
-        ETree.SubElement(logging_element, 'Read').text = str(logging.read)
-        ETree.SubElement(logging_element, 'Write').text = str(logging.write)
-
-        retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
-        _convert_retention_policy_to_xml(logging.retention_policy, retention_element)
-
-    # HourMetrics
-    if hour_metrics:
-        hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
-        _convert_metrics_to_xml(hour_metrics, hour_metrics_element)
-
-    # MinuteMetrics
-    if minute_metrics:
-        minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
-        _convert_metrics_to_xml(minute_metrics, minute_metrics_element)
-
-    # CORS
-    # Make sure to still serialize empty list
-    if cors is not None:
-        cors_element = ETree.SubElement(service_properties_element, 'Cors')
-        for rule in cors:
-            cors_rule = ETree.SubElement(cors_element, 'CorsRule')
-            ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
-            ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
-            ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
-            ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
-            ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
-
-    # Target version
-    if target_version:
-        ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
-
-    # DeleteRetentionPolicy
-    if delete_retention_policy:
-        policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
-        ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
-
-        if delete_retention_policy.enabled:
-            ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
-
-    # StaticWebsite
-    if static_website:
-        static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite')
-        ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled)
-
-        if static_website.enabled:
-
-            if static_website.index_document is not None:
-                ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document)
-
-            if static_website.error_document_404_path is not None:
-                ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \
-                    str(static_website.error_document_404_path)
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
-                                                            method='xml')
-    except:
-        raise
-    finally:
-        output = stream.getvalue()
-        stream.close()
-
-    return output
-
-
-def _convert_metrics_to_xml(metrics, root):
-    '''
-    <Version>version-number</Version>
-    <Enabled>true|false</Enabled>
-    <IncludeAPIs>true|false</IncludeAPIs>
-    <RetentionPolicy>
-        <Enabled>true|false</Enabled>
-        <Days>number-of-days</Days>
-    </RetentionPolicy>
-    '''
-    # Version
-    ETree.SubElement(root, 'Version').text = metrics.version
-
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
-
-    # IncludeAPIs
-    if metrics.enabled and metrics.include_apis is not None:
-        ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
-
-    # RetentionPolicy
-    retention_element = ETree.SubElement(root, 'RetentionPolicy')
-    _convert_retention_policy_to_xml(metrics.retention_policy, retention_element)
-
-
-def _convert_retention_policy_to_xml(retention_policy, root):
-    '''
-    <Enabled>true|false</Enabled>
-    <Days>number-of-days</Days>
-    '''
-    # Enabled
-    ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled)
-
-    # Days
-    if retention_policy.enabled and retention_policy.days:
-        ETree.SubElement(root, 'Days').text = str(retention_policy.days)
-
-
-def _len_plus(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            return fstat(fileno).st_size
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/cloudstorageaccount.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/cloudstorageaccount.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/cloudstorageaccount.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/cloudstorageaccount.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,198 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-# Note that we import BlobService/QueueService/FileService on demand
-# because this module is imported by azure/storage/__init__
-# ie. we don't want 'import azure.storage' to trigger an automatic import
-# of blob/queue/file packages.
-
-from ._error import _validate_not_none
-from .models import (
-    ResourceTypes,
-    Services,
-    AccountPermissions,
-)
-from .sharedaccesssignature import (
-    SharedAccessSignature,
-)
-
-
-class CloudStorageAccount(object):
-    """
-    Provides a factory for creating the blob, queue, and file services
-    with a common account name and account key or sas token.  Users can either 
-    use the factory or can construct the appropriate service directly.
-    """
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 is_emulated=None, endpoint_suffix=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless is_emulated is used.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will
-            override all other parameters.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use a sovereign cloud.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.sas_token = sas_token
-        self.is_emulated = is_emulated
-        self.endpoint_suffix = endpoint_suffix
-
-    def create_block_blob_service(self):
-        '''
-        Creates a BlockBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService`
-        '''
-        try:
-            from ..blob.blockblobservice import BlockBlobService
-            return BlockBlobService(self.account_name, self.account_key,
-                                    sas_token=self.sas_token,
-                                    is_emulated=self.is_emulated,
-                                    endpoint_suffix=self.endpoint_suffix)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_page_blob_service(self):
-        '''
-        Creates a PageBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService`
-        '''
-        try:
-            from ..blob.pageblobservice import PageBlobService
-            return PageBlobService(self.account_name, self.account_key,
-                                   sas_token=self.sas_token,
-                                   is_emulated=self.is_emulated,
-                                   endpoint_suffix=self.endpoint_suffix)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_append_blob_service(self):
-        '''
-        Creates a AppendBlobService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService`
-        '''
-        try:
-            from ..blob.appendblobservice import AppendBlobService
-            return AppendBlobService(self.account_name, self.account_key,
-                                     sas_token=self.sas_token,
-                                     is_emulated=self.is_emulated,
-                                     endpoint_suffix=self.endpoint_suffix)
-        except ImportError:
-            raise Exception('The package azure-storage-blob is required. '
-                            + 'Please install it using "pip install azure-storage-blob"')
-
-    def create_queue_service(self):
-        '''
-        Creates a QueueService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.queue.queueservice.QueueService`
-        '''
-        try:
-            from ..queue.queueservice import QueueService
-            return QueueService(self.account_name, self.account_key,
-                                sas_token=self.sas_token,
-                                is_emulated=self.is_emulated,
-                                endpoint_suffix=self.endpoint_suffix)
-        except ImportError:
-            raise Exception('The package azure-storage-queue is required. '
-                            + 'Please install it using "pip install azure-storage-queue"')
-
-    def create_file_service(self):
-        '''
-        Creates a FileService object with the settings specified in the 
-        CloudStorageAccount.
-
-        :return: A service object.
-        :rtype: :class:`~azure.storage.file.fileservice.FileService`
-        '''
-        try:
-            from ..file.fileservice import FileService
-            return FileService(self.account_name, self.account_key,
-                               sas_token=self.sas_token,
-                               endpoint_suffix=self.endpoint_suffix)
-        except ImportError:
-            raise Exception('The package azure-storage-file is required. '
-                            + 'Please install it using "pip install azure-storage-file"')
-
-    def generate_shared_access_signature(self, services, resource_types,
-                                         permission, expiry, start=None,
-                                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = SharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(services, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/models.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/models.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,672 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info < (3,):
-    from collections import Iterable
-
-    _unicode_type = unicode
-else:
-    from collections.abc import Iterable
-
-    _unicode_type = str
-
-from ._error import (
-    _validate_not_none
-)
-
-
-class _HeaderDict(dict):
-    def __getitem__(self, index):
-        return super(_HeaderDict, self).__getitem__(index.lower())
-
-
-class _list(list):
-    '''Used so that additional properties can be set on the return list'''
-    pass
-
-
-class _dict(dict):
-    '''Used so that additional properties can be set on the return dictionary'''
-    pass
-
-
-class _OperationContext(object):
-    '''
-    Contains information that lasts the lifetime of an operation. This operation 
-    may span multiple calls to the Azure service.
-
-    :ivar bool location_lock: 
-        Whether the location should be locked for this operation.
-    :ivar str location: 
-        The location to lock to.
-    '''
-
-    def __init__(self, location_lock=False):
-        self.location_lock = location_lock
-        self.host_location = None
-
-
-class ListGenerator(Iterable):
-    '''
-    A generator object used to list storage resources. The generator will lazily 
-    follow the continuation tokens returned by the service and stop when all 
-    resources have been returned or max_results is reached.
-
-    If max_results is specified and the account has more than that number of 
-    resources, the generator will have a populated next_marker field once it 
-    finishes. This marker can be used to create a new generator if more 
-    results are desired.
-    '''
-
-    def __init__(self, resources, list_method, list_args, list_kwargs):
-        self.items = resources
-        self.next_marker = resources.next_marker
-
-        self._list_method = list_method
-        self._list_args = list_args
-        self._list_kwargs = list_kwargs
-
-    def __iter__(self):
-        # return results
-        for i in self.items:
-            yield i
-
-        while True:
-            # if no more results on the service, return
-            if not self.next_marker:
-                break
-
-            # update the marker args
-            self._list_kwargs['marker'] = self.next_marker
-
-            # handle max results, if present
-            max_results = self._list_kwargs.get('max_results')
-            if max_results is not None:
-                max_results = max_results - len(self.items)
-
-                # if we've reached max_results, return
-                # else, update the max_results arg
-                if max_results <= 0:
-                    break
-                else:
-                    self._list_kwargs['max_results'] = max_results
-
-            # get the next segment
-            resources = self._list_method(*self._list_args, **self._list_kwargs)
-            self.items = resources
-            self.next_marker = resources.next_marker
-
-            # return results
-            for i in self.items:
-                yield i
-
-
-class RetryContext(object):
-    '''
-    Contains the request and response information that can be used to determine 
-    whether and how to retry. This context is stored across retries and may be 
-    used to store other information relevant to the retry strategy.
-
-    :ivar ~azure.storage.common._http.HTTPRequest request:
-        The request sent to the storage service.
-    :ivar ~azure.storage.common._http.HTTPResponse response:
-        The response returned by the storage service.
-    :ivar LocationMode location_mode:
-        The location the request was sent to.
-    :ivar Exception exception:
-        The exception that just occurred. The type could either be AzureException (for HTTP errors),
-        or other Exception types from lower layers, which are kept unwrapped for easier processing.
-    :ivar bool is_emulated:
-        Whether retry is targeting the emulator. The default value is False.
-    :ivar int body_position:
-        The initial position of the body stream. It is useful when retries happen and we need to rewind the stream.
-    '''
-
-    def __init__(self):
-        self.request = None
-        self.response = None
-        self.location_mode = None
-        self.exception = None
-        self.is_emulated = False
-        self.body_position = None
-
-
-class LocationMode(object):
-    '''
-    Specifies the location the request should be sent to. This mode only applies 
-    for RA-GRS accounts which allow secondary read access. All other account types 
-    must use PRIMARY.
-    '''
-
-    PRIMARY = 'primary'
-    ''' Requests should be sent to the primary location. '''
-
-    SECONDARY = 'secondary'
-    ''' Requests should be sent to the secondary location, if possible. '''
-
-
-class RetentionPolicy(object):
-    '''
-    By default, Storage Analytics will not delete any logging or metrics data. Blobs
-    will continue to be written until the shared 20TB limit is
-    reached. Once the 20TB limit is reached, Storage Analytics will stop writing 
-    new data and will not resume until free space is available. This 20TB limit 
-    is independent of the total limit for your storage account.
-
-    There are two ways to delete Storage Analytics data: by manually making deletion 
-    requests or by setting a data retention policy. Manual requests to delete Storage 
-    Analytics data are billable, but delete requests resulting from a retention policy 
-    are not billable.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled: 
-            Indicates whether a retention policy is enabled for the 
-            storage service. If disabled, logging and metrics data will be retained 
-            infinitely by the service unless explicitly deleted.
-        :param int days: 
-            Required if enabled is true. Indicates the number of 
-            days that metrics or logging data should be retained. All data older 
-            than this value will be deleted. The minimum value you can specify is 1; 
-            the largest value is 365 (one year).
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class Logging(object):
-    '''
-    Storage Analytics logs detailed information about successful and failed requests 
-    to a storage service. This information can be used to monitor individual requests 
-    and to diagnose issues with a storage service. Requests are logged on a best-effort 
-    basis.
-
-    All logs are stored in block blobs in a container named $logs, which is
-    automatically created when Storage Analytics is enabled for a storage account. 
-    The $logs container is located in the blob namespace of the storage account. 
-    This container cannot be deleted once Storage Analytics has been enabled, though 
-    its contents can be deleted.
-
-    For more information, see  https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx
-    '''
-
-    def __init__(self, delete=False, read=False, write=False,
-                 retention_policy=None):
-        '''
-        :param bool delete: 
-            Indicates whether all delete requests should be logged.
-        :param bool read: 
-            Indicates whether all read requests should be logged.
-        :param bool write: 
-            Indicates whether all write requests should be logged.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("read", read)
-        _validate_not_none("write", write)
-        _validate_not_none("delete", delete)
-
-        self.version = u'1.0'
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class Metrics(object):
-    '''
-    Metrics include aggregated transaction statistics and capacity data about requests 
-    to a storage service. Transactions are reported at both the API operation level 
-    as well as at the storage service level, and capacity is reported at the storage 
-    service level. Metrics data can be used to analyze storage service usage, diagnose 
-    issues with requests made against the storage service, and to improve the 
-    performance of applications that use a service.
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx
-    '''
-
-    def __init__(self, enabled=False, include_apis=None,
-                 retention_policy=None):
-        '''
-        :param bool enabled: 
-            Indicates whether metrics are enabled for 
-            the service.
-        :param bool include_apis: 
-            Required if enabled is True. Indicates whether metrics 
-            should generate summary statistics for called API operations.
-        :param RetentionPolicy retention_policy: 
-            The retention policy for the metrics.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("include_apis", include_apis)
-
-        self.version = u'1.0'
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy if retention_policy else RetentionPolicy()
-
-
-class CorsRule(object):
-    '''
-    CORS is an HTTP feature that enables a web application running under one domain 
-    to access resources in another domain. Web browsers implement a security 
-    restriction known as same-origin policy that prevents a web page from calling 
-    APIs in a different domain; CORS provides a secure way to allow one domain 
-    (the origin domain) to call APIs in another domain. 
-
-    For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0,
-                 exposed_headers=None, allowed_headers=None):
-        '''
-        :param allowed_origins: 
-            A list of origin domains that will be allowed via CORS, or "*" to allow 
-            all domains. The list of must contain at least one entry. Limited to 64 
-            origin domains. Each allowed origin can have up to 256 characters.
-        :type allowed_origins: list(str)
-        :param allowed_methods:
-            A list of HTTP methods that are allowed to be executed by the origin. 
-            The list of must contain at least one entry. For Azure Storage, 
-            permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-        :type allowed_methods: list(str)
-        :param int max_age_in_seconds:
-            The number of seconds that the client/browser should cache a 
-            preflight response.
-        :param exposed_headers:
-            Defaults to an empty list. A list of response headers to expose to CORS 
-            clients. Limited to 64 defined headers and two prefixed headers. Each 
-            header can be up to 256 characters.
-        :type exposed_headers: list(str)
-        :param allowed_headers:
-            Defaults to an empty list. A list of headers allowed to be part of 
-            the cross-origin request. Limited to 64 defined headers and 2 prefixed 
-            headers. Each header can be up to 256 characters.
-        :type allowed_headers: list(str)
-        '''
-        _validate_not_none("allowed_origins", allowed_origins)
-        _validate_not_none("allowed_methods", allowed_methods)
-        _validate_not_none("max_age_in_seconds", max_age_in_seconds)
-
-        self.allowed_origins = allowed_origins if allowed_origins else list()
-        self.allowed_methods = allowed_methods if allowed_methods else list()
-        self.max_age_in_seconds = max_age_in_seconds
-        self.exposed_headers = exposed_headers if exposed_headers else list()
-        self.allowed_headers = allowed_headers if allowed_headers else list()
-
-
-class DeleteRetentionPolicy(object):
-    '''
-    To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later.
-    This class groups the settings related to delete retention policy.
-    '''
-
-    def __init__(self, enabled=False, days=None):
-        '''
-        :param bool enabled:
-            Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation.
-        :param int days:
-            Required only if Enabled is true. Indicates the number of days that deleted blob be retained.
-            All data older than this value will be permanently deleted.
-            The minimum value you can specify is 1; the largest value is 365.
-        '''
-        _validate_not_none("enabled", enabled)
-        if enabled:
-            _validate_not_none("days", days)
-
-        self.enabled = enabled
-        self.days = days
-
-
-class StaticWebsite(object):
-    '''
-    Class representing the service properties pertaining to static websites.
-    To set StaticWebsite, you must call Set Blob Service Properties using version 2018-03-28 or later.
-    '''
-
-    def __init__(self, enabled=False, index_document=None, error_document_404_path=None):
-        '''
-        :param bool enabled:
-            Required. True if static websites should be enabled on the blob service for the corresponding Storage Account.
-        :param str index_document:
-            Represents the name of the index document. This is commonly "index.html".
-        :param str error_document_404_path:
-            Represents the path to the error document that should be shown when an error 404 is issued,
-            in other words, when a browser requests a page that does not exist.
-        '''
-        _validate_not_none("enabled", enabled)
-
-        self.enabled = enabled
-        self.index_document = index_document
-        self.error_document_404_path = error_document_404_path
-
-
-class ServiceProperties(object):
-    ''' 
-    Returned by get_*_service_properties functions. Contains the properties of a 
-    storage service, including Analytics and CORS rules.
-
-    Azure Storage Analytics performs logging and provides metrics data for a storage 
-    account. You can use this data to trace requests, analyze usage trends, and 
-    diagnose issues with your storage account. To use Storage Analytics, you must 
-    enable it individually for each service you want to monitor.
-
-    The aggregated data is stored in a well-known blob (for logging) and in well-known 
-    tables (for metrics), which may be accessed using the Blob service and Table 
-    service APIs.
-
-    For an in-depth guide on using Storage Analytics and other tools to identify, 
-    diagnose, and troubleshoot Azure Storage-related issues, see 
-    http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/
-
-    For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx
-    '''
-
-    pass
-
-
-class ServiceStats(object):
-    ''' 
-    Returned by get_*_service_stats functions. Contains statistics related to 
-    replication for the given service. It is only available when read-access 
-    geo-redundant replication is enabled for the storage account.
-
-    :ivar GeoReplication geo_replication:
-        An object containing statistics related to replication for the given service.
-    '''
-    pass
-
-
-class GeoReplication(object):
-    ''' 
-    Contains statistics related to replication for the given service.
-
-    :ivar str status:
-        The status of the secondary location. Possible values are:
-            live: Indicates that the secondary location is active and operational.
-            bootstrap: Indicates initial synchronization from the primary location 
-                to the secondary location is in progress. This typically occurs 
-                when replication is first enabled.
-            unavailable: Indicates that the secondary location is temporarily 
-                unavailable.
-    :ivar date last_sync_time:
-        A GMT date value, to the second. All primary writes preceding this value 
-        are guaranteed to be available for read operations at the secondary. 
-        Primary writes after this point in time may or may not be available for 
-        reads. The value may be empty if LastSyncTime is not available. This can 
-        happen if the replication status is bootstrap or unavailable. Although 
-        geo-replication is continuously enabled, the LastSyncTime result may 
-        reflect a cached value from the service that is refreshed every few minutes.
-    '''
-    pass
-
-
-class AccessPolicy(object):
-    '''
-    Access Policy class used by the set and get acl methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and 
-    permissions for the Shared Access Signatures with which it's associated. 
-    Depending on how you want to control access to your resource, you can
-    specify all of these parameters within the stored access policy, and omit 
-    them from the URL for the Shared Access Signature. Doing so permits you to 
-    modify the associated signature's behavior at any time, as well as to revoke 
-    it. Or you can specify one or more of the access policy parameters within 
-    the stored access policy, and the others on the URL. Finally, you can 
-    specify all of the parameters on the URL. In this case, you can use the 
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must 
-    include all fields required to authenticate the signature. If any required 
-    fields are missing, the request will fail. Likewise, if a field is specified 
-    both in the Shared Access Signature URL and in the stored access policy, the 
-    request will fail with status code 400 (Bad Request).
-    '''
-
-    def __init__(self, permission=None, expiry=None, start=None):
-        '''
-        :param str permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        '''
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class Protocol(object):
-    '''
-    Specifies the protocol permitted for a SAS token. Note that HTTP only is 
-    not allowed.
-    '''
-
-    HTTPS = 'https'
-    ''' Allow HTTPS requests only. '''
-
-    HTTPS_HTTP = 'https,http'
-    ''' Allow HTTP and HTTPS requests. '''
-
-
-class ResourceTypes(object):
-    '''
-    Specifies the resource types that are accessible with the account SAS.
-
-    :ivar ResourceTypes ResourceTypes.CONTAINER:
-        Access to container-level APIs (e.g., Create/Delete Container, 
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories) 
-    :ivar ResourceTypes ResourceTypes.OBJECT:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-    :ivar ResourceTypes ResourceTypes.SERVICE:
-        Access to service-level APIs (e.g., Get/Set Service Properties, 
-        Get Service Stats, List Containers/Queues/Shares)
-    '''
-
-    def __init__(self, service=False, container=False, object=False, _str=None):
-        '''
-        :param bool service:
-            Access to service-level APIs (e.g., Get/Set Service Properties, 
-            Get Service Stats, List Containers/Queues/Shares)
-        :param bool container:
-            Access to container-level APIs (e.g., Create/Delete Container, 
-            Create/Delete Queue, Create/Delete Share,
-            List Blobs/Files and Directories) 
-        :param bool object:
-            Access to object-level APIs for blobs, queue messages, and
-            files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) 
-        :param str _str: 
-            A string representing the resource types.
-        '''
-        if not _str:
-            _str = ''
-        self.service = service or ('s' in _str)
-        self.container = container or ('c' in _str)
-        self.object = object or ('o' in _str)
-
-    def __or__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return ResourceTypes(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-
-ResourceTypes.SERVICE = ResourceTypes(service=True)
-ResourceTypes.CONTAINER = ResourceTypes(container=True)
-ResourceTypes.OBJECT = ResourceTypes(object=True)
-
-
-class Services(object):
-    '''
-    Specifies the services accessible with the account SAS.
-
-    :ivar Services Services.BLOB: The blob service.
-    :ivar Services Services.FILE: The file service
-    :ivar Services Services.QUEUE: The queue service.
-    :ivar Services Services.TABLE: The table service.
-    '''
-
-    def __init__(self, blob=False, queue=False, file=False, table=False, _str=None):
-        '''
-        :param bool blob:
-            Access to any blob service, for example, the `.BlockBlobService`
-        :param bool queue:
-            Access to the `.QueueService`
-        :param bool file:
-            Access to the `.FileService`
-        :param bool table:
-            Access to the TableService
-        :param str _str: 
-            A string representing the services.
-        '''
-        if not _str:
-            _str = ''
-        self.blob = blob or ('b' in _str)
-        self.queue = queue or ('q' in _str)
-        self.file = file or ('f' in _str)
-        self.table = table or ('t' in _str)
-
-    def __or__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return Services(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('t' if self.table else '') +
-                ('f' if self.file else ''))
-
-
-Services.BLOB = Services(blob=True)
-Services.QUEUE = Services(queue=True)
-Services.TABLE = Services(table=True)
-Services.FILE = Services(file=True)
-
-
-class AccountPermissions(object):
-    '''
-    :class:`~ResourceTypes` class to be used with generate_shared_access_signature 
-    method and for the AccessPolicies used with set_*_acl. There are two types of 
-    SAS which may be used to grant resource access. One is to grant access to a 
-    specific resource (resource-specific). Another is to grant access to the 
-    entire service for a specific account and allow certain operations based on 
-    perms found here.
-
-    :ivar AccountPermissions AccountPermissions.ADD:
-        Valid for the following Object resource types only: queue messages and append blobs.
-    :ivar AccountPermissions AccountPermissions.CREATE:
-        Valid for the following Object resource types only: blobs and files. Users 
-        can create new blobs or files, but may not overwrite existing blobs or files. 
-    :ivar AccountPermissions AccountPermissions.DELETE:
-        Valid for Container and Object resource types, except for queue messages. 
-    :ivar AccountPermissions AccountPermissions.LIST:
-        Valid for Service and Container resource types only. 
-    :ivar AccountPermissions AccountPermissions.PROCESS:
-        Valid for the following Object resource type only: queue messages. 
-    :ivar AccountPermissions AccountPermissions.READ:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits read permissions to the specified resource type. 
-    :ivar AccountPermissions AccountPermissions.UPDATE:
-        Valid for the following Object resource types only: queue messages.
-    :ivar AccountPermissions AccountPermissions.WRITE:
-        Valid for all signed resources types (Service, Container, and Object). 
-        Permits write permissions to the specified resource type. 
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 add=False, create=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits read permissions to the specified resource type.
-        :param bool write:
-            Valid for all signed resources types (Service, Container, and Object). 
-            Permits write permissions to the specified resource type.
-        :param bool delete: 
-            Valid for Container and Object resource types, except for queue messages.
-        :param bool list:
-            Valid for Service and Container resource types only.
-        :param bool add:
-            Valid for the following Object resource types only: queue messages, and append blobs.
-        :param bool create:
-            Valid for the following Object resource types only: blobs and files. 
-            Users can create new blobs or files, but may not overwrite existing 
-            blobs or files.
-        :param bool update:
-            Valid for the following Object resource types only: queue messages.
-        :param bool process:
-            Valid for the following Object resource type only: queue messages.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-        self.add = add or ('a' in _str)
-        self.create = create or ('c' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return AccountPermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return AccountPermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else '') +
-                ('a' if self.add else '') +
-                ('c' if self.create else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-AccountPermissions.READ = AccountPermissions(read=True)
-AccountPermissions.WRITE = AccountPermissions(write=True)
-AccountPermissions.DELETE = AccountPermissions(delete=True)
-AccountPermissions.LIST = AccountPermissions(list=True)
-AccountPermissions.ADD = AccountPermissions(add=True)
-AccountPermissions.CREATE = AccountPermissions(create=True)
-AccountPermissions.UPDATE = AccountPermissions(update=True)
-AccountPermissions.PROCESS = AccountPermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/retry.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/retry.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/retry.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/retry.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,306 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from abc import ABCMeta
-from math import pow
-import random
-from io import (SEEK_SET, UnsupportedOperation)
-
-from .models import LocationMode
-from ._constants import (
-    DEV_ACCOUNT_NAME,
-    DEV_ACCOUNT_SECONDARY_NAME
-)
-
-
-class _Retry(object):
-    '''
-    The base class for Exponential and Linear retries containing shared code.
-    '''
-    __metaclass__ = ABCMeta
-
-    def __init__(self, max_attempts, retry_to_secondary):
-        '''
-        Constructs a base retry object.
-
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        '''
-        self.max_attempts = max_attempts
-        self.retry_to_secondary = retry_to_secondary
-
-    def _should_retry(self, context):
-        '''
-        A function which determines whether or not to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :return: 
-            A boolean indicating whether or not to retry the request.
-        :rtype: bool
-        '''
-        # If max attempts are reached, do not retry.
-        if context.count >= self.max_attempts:
-            return False
-
-        status = None
-        if context.response and context.response.status:
-            status = context.response.status
-
-        if status is None:
-            '''
-            If status is None, retry as this request triggered an exception. For 
-            example, network issues would trigger this.
-            '''
-            return True
-        elif 200 <= status < 300:
-            '''
-            This method is called after a successful response, meaning we failed 
-            during the response body download or parsing. So, success codes should 
-            be retried.
-            '''
-            return True
-        elif 300 <= status < 500:
-            '''
-            An exception occured, but in most cases it was expected. Examples could 
-            include a 309 Conflict or 412 Precondition Failed.
-            '''
-            if status == 404 and context.location_mode == LocationMode.SECONDARY:
-                # Response code 404 should be retried if secondary was used.
-                return True
-            if status == 408:
-                # Response code 408 is a timeout and should be retried.
-                return True
-            return False
-        elif status >= 500:
-            '''
-            Response codes above 500 with the exception of 501 Not Implemented and 
-            505 Version Not Supported indicate a server issue and should be retried.
-            '''
-            if status == 501 or status == 505:
-                return False
-            return True
-        else:
-            # If something else happened, it's unexpected. Retry.
-            return True
-
-    def _set_next_host_location(self, context):
-        '''
-        A function which sets the next host location on the request, if applicable. 
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context containing the previous host location and the request 
-            to evaluate and possibly modify.
-        '''
-        if len(context.request.host_locations) > 1:
-            # If there's more than one possible location, retry to the alternative
-            if context.location_mode == LocationMode.PRIMARY:
-                context.location_mode = LocationMode.SECONDARY
-
-                # if targeting the emulator (with path style), change path instead of host
-                if context.is_emulated:
-                    # replace the first instance of primary account name with the secondary account name
-                    context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1)
-                else:
-                    context.request.host = context.request.host_locations.get(context.location_mode)
-            else:
-                context.location_mode = LocationMode.PRIMARY
-
-                # if targeting the emulator (with path style), change path instead of host
-                if context.is_emulated:
-                    # replace the first instance of secondary account name with the primary account name
-                    context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1)
-                else:
-                    context.request.host = context.request.host_locations.get(context.location_mode)
-
-    def _retry(self, context, backoff):
-        '''
-        A function which determines whether and how to retry.
-
-        :param ~azure.storage.models.RetryContext context: 
-            The retry context. This contains the request, response, and other data 
-            which can be used to determine whether or not to retry.
-        :param function() backoff:
-            A function which returns the backoff time if a retry is to be performed.
-        :return: 
-            An integer indicating how long to wait before retrying the request, 
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        '''
-        # If the context does not contain a count parameter, this request has not 
-        # been retried yet. Add the count parameter to track the number of retries.
-        if not hasattr(context, 'count'):
-            context.count = 0
-
-        # Determine whether to retry, and if so increment the count, modify the 
-        # request as desired, and return the backoff.
-        if self._should_retry(context):
-            backoff_interval = backoff(context)
-            context.count += 1
-
-            # If retry to secondary is enabled, attempt to change the host if the 
-            # request allows it
-            if self.retry_to_secondary:
-                self._set_next_host_location(context)
-
-            # rewind the request body if it is a stream
-            if hasattr(context.request, 'body') and hasattr(context.request.body, 'read'):
-                # no position was saved, then retry would not work
-                if context.body_position is None:
-                    return None
-                else:
-                    try:
-                        # attempt to rewind the body to the initial position
-                        context.request.body.seek(context.body_position, SEEK_SET)
-                    except UnsupportedOperation:
-                        # if body is not seekable, then retry would not work
-                        return None
-
-            return backoff_interval
-
-        return None
-
-
-class ExponentialRetry(_Retry):
-    '''
-    Exponential retry.
-    '''
-
-    def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3,
-                 retry_to_secondary=False, random_jitter_range=3):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for 
-        the first retry. Subsequent retries are retried after initial_backoff + 
-        increment_power^retry_count seconds. For example, by default the first retry 
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the 
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff: 
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the 
-            first retry.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(_Retry):
-    '''
-    Linear retry.
-    '''
-
-    def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3):
-        '''
-        Constructs a Linear retry object.
-
-        :param int backoff: 
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts: 
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should 
-            only be enabled of RA-GRS accounts are used and potentially stale data 
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.backoff = backoff
-        self.max_attempts = max_attempts
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(max_attempts, retry_to_secondary)
-
-    '''
-    A function which determines whether and how to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context. This contains the request, response, and other data 
-        which can be used to determine whether or not to retry.
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def retry(self, context):
-        return self._retry(context, self._backoff)
-
-    '''
-    Calculates how long to sleep before retrying.
-
-    :return: 
-        An integer indicating how long to wait before retrying the request, 
-        or None to indicate no retry should be performed.
-    :rtype: int or None
-    '''
-
-    def _backoff(self, context):
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0
-        self.random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(self.random_range_start, self.random_range_end)
-
-
-def no_retry(context):
-    '''
-    Specifies never to retry.
-
-    :param ~azure.storage.models.RetryContext context: 
-        The retry context.
-    :return: 
-        Always returns None to indicate never to retry.
-    :rtype: None
-    '''
-    return None
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,180 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from datetime import date
-
-from ._common_conversion import (
-    _sign_string,
-    _to_str,
-)
-from ._constants import DEFAULT_X_MS_VERSION
-from ._serialization import (
-    url_quote,
-    _to_utc_datetime,
-)
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the 
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service 
-        or to create a new account object.
-
-        :param Services services:
-            Specifies the services accessible with the account SAS. You can 
-            combine values to provide access to more than one service. 
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account 
-            SAS. You can combine values to provide access to more than one 
-            resource type. 
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy. You can combine 
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _to_str(val)
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(_QueryStringConstants.SIGNED_START, start)
-        self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(_QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, id):
-        self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/storageclient.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/storageclient.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/storageclient.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/storageclient.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,440 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import requests
-from abc import ABCMeta
-import logging
-from time import sleep
-import sys
-
-from azure.common import (
-    AzureException,
-    AzureHttpError,
-)
-
-from ._constants import (
-    DEFAULT_SOCKET_TIMEOUT,
-    DEFAULT_X_MS_VERSION,
-    DEFAULT_USER_AGENT_STRING,
-    USER_AGENT_STRING_PREFIX,
-    USER_AGENT_STRING_SUFFIX,
-    _AUTHORIZATION_HEADER_NAME,
-    _REDACTED_VALUE,
-    _COPY_SOURCE_HEADER_NAME,
-)
-from ._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _http_error_handler,
-    _wrap_exception,
-    AzureSigningError,
-)
-from ._http import HTTPError
-from ._http.httpclient import _HTTPClient
-from ._serialization import (
-    _update_request,
-    _add_date_header,
-)
-from .models import (
-    RetryContext,
-    LocationMode,
-    _OperationContext,
-)
-from .retry import ExponentialRetry
-from io import UnsupportedOperation
-from .sharedaccesssignature import _QueryStringConstants
-
-if sys.version_info >= (3,):
-    from urllib.parse import (
-        urlparse,
-        parse_qsl,
-        urlunparse,
-        urlencode,
-    )
-else:
-    from urlparse import (
-        urlparse,
-        parse_qsl,
-        urlunparse,
-    )
-    from urllib import urlencode
-logger = logging.getLogger(__name__)
-
-
-class StorageClient(object):
-    '''
-    This is the base class for service objects. Service objects are used to do 
-    all requests to Storage. This class cannot be instantiated directly.
-
-    :ivar str account_name:
-        The storage account name. This is used to authenticate requests 
-        signed with an account key and to construct the storage endpoint. It 
-        is required unless a connection string is given, or if a custom 
-        domain is used with anonymous authentication.
-    :ivar str account_key:
-        The storage account key. This is used for shared key authentication. 
-        If neither account key or sas token is specified, anonymous access 
-        will be used.
-    :ivar str sas_token:
-        A shared access signature token to use to authenticate requests 
-        instead of the account key. If account key and sas token are both 
-        specified, account key will be used to sign. If neither are 
-        specified, anonymous access will be used.
-    :ivar str primary_endpoint:
-        The endpoint to send storage requests to.
-    :ivar str secondary_endpoint:
-        The secondary endpoint to read storage data from. This will only be a 
-        valid endpoint if the storage account used is RA-GRS and thus allows 
-        reading from secondary.
-    :ivar function(context) retry:
-        A function which determines whether to retry. Takes as a parameter a 
-        :class:`~azure.storage.common.models.RetryContext` object. Returns the number
-        of seconds to wait before retrying the request, or None to indicate not 
-        to retry.
-    :ivar ~azure.storage.common.models.LocationMode location_mode:
-        The host location to use to make requests. Defaults to LocationMode.PRIMARY.
-        Note that this setting only applies to RA-GRS accounts as other account 
-        types do not allow reading from secondary. If the location_mode is set to 
-        LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. 
-        Write requests will continue to be sent to primary.
-    :ivar str protocol:
-        The protocol to use for requests. Defaults to https.
-    :ivar requests.Session request_session:
-        The session object to use for http requests.
-    :ivar function(request) request_callback:
-        A function called immediately before each request is sent. This function 
-        takes as a parameter the request object and returns nothing. It may be 
-        used to added custom headers or log request data.
-    :ivar function() response_callback:
-        A function called immediately after each response is received. This 
-        function takes as a parameter the response object and returns nothing. 
-        It may be used to log response data.
-    :ivar function() retry_callback:
-        A function called immediately after retry evaluation is performed. This 
-        function takes as a parameter the retry context object and returns nothing. 
-        It may be used to detect retries and log context information.
-    '''
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, connection_params):
-        '''
-        :param obj connection_params: The parameters to use to construct the client.
-        '''
-        self.account_name = connection_params.account_name
-        self.account_key = connection_params.account_key
-        self.sas_token = connection_params.sas_token
-        self.token_credential = connection_params.token_credential
-        self.is_emulated = connection_params.is_emulated
-
-        self.primary_endpoint = connection_params.primary_endpoint
-        self.secondary_endpoint = connection_params.secondary_endpoint
-
-        protocol = connection_params.protocol
-        request_session = connection_params.request_session or requests.Session()
-        socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT
-        self._httpclient = _HTTPClient(
-            protocol=protocol,
-            session=request_session,
-            timeout=socket_timeout,
-        )
-
-        self.retry = ExponentialRetry().retry
-        self.location_mode = LocationMode.PRIMARY
-
-        self.request_callback = None
-        self.response_callback = None
-        self.retry_callback = None
-        self._X_MS_VERSION = DEFAULT_X_MS_VERSION
-        self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING
-
-    def _update_user_agent_string(self, service_package_version):
-        self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX,
-                                                   service_package_version,
-                                                   USER_AGENT_STRING_SUFFIX)
-
-    @property
-    def socket_timeout(self):
-        return self._httpclient.timeout
-
-    @socket_timeout.setter
-    def socket_timeout(self, value):
-        self._httpclient.timeout = value
-
-    @property
-    def protocol(self):
-        return self._httpclient.protocol
-
-    @protocol.setter
-    def protocol(self, value):
-        self._httpclient.protocol = value
-
-    @property
-    def request_session(self):
-        return self._httpclient.session
-
-    @request_session.setter
-    def request_session(self, value):
-        self._httpclient.session = value
-
-    def set_proxy(self, host, port, user=None, password=None):
-        '''
-        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
-
-        :param str host: Address of the proxy. Ex: '192.168.0.100'
-        :param int port: Port of the proxy. Ex: 6000
-        :param str user: User for proxy authorization.
-        :param str password: Password for proxy authorization.
-        '''
-        self._httpclient.set_proxy(host, port, user, password)
-
-    def _get_host_locations(self, primary=True, secondary=False):
-        locations = {}
-        if primary:
-            locations[LocationMode.PRIMARY] = self.primary_endpoint
-        if secondary:
-            locations[LocationMode.SECONDARY] = self.secondary_endpoint
-        return locations
-
-    def _apply_host(self, request, operation_context, retry_context):
-        if operation_context.location_lock and operation_context.host_location:
-            # If this is a location locked operation and the location is set, 
-            # override the request location and host_location.
-            request.host_locations = operation_context.host_location
-            request.host = list(operation_context.host_location.values())[0]
-            retry_context.location_mode = list(operation_context.host_location.keys())[0]
-        elif len(request.host_locations) == 1:
-            # If only one location is allowed, use that location.
-            request.host = list(request.host_locations.values())[0]
-            retry_context.location_mode = list(request.host_locations.keys())[0]
-        else:
-            # If multiple locations are possible, choose based on the location mode.
-            request.host = request.host_locations.get(self.location_mode)
-            retry_context.location_mode = self.location_mode
-
-    @staticmethod
-    def extract_date_and_request_id(retry_context):
-        if getattr(retry_context, 'response', None) is None:
-            return ""
-        resp = retry_context.response
-
-        if 'date' in resp.headers and 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Timestamp={0}, Server-Request-ID={1}",
-                              resp.headers['date'], resp.headers['x-ms-request-id'])
-        elif 'date' in resp.headers:
-            return str.format("Server-Timestamp={0}", resp.headers['date'])
-        elif 'x-ms-request-id' in resp.headers:
-            return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id'])
-        else:
-            return ""
-
-    @staticmethod
-    def _scrub_headers(headers):
-        # make a copy to avoid contaminating the request
-        clean_headers = headers.copy()
-
-        if _AUTHORIZATION_HEADER_NAME in clean_headers:
-            clean_headers[_AUTHORIZATION_HEADER_NAME] = _REDACTED_VALUE
-
-        # in case of copy operations, there could be a SAS signature present in the header value
-        if _COPY_SOURCE_HEADER_NAME in clean_headers \
-                and _QueryStringConstants.SIGNED_SIGNATURE + "=" in clean_headers[_COPY_SOURCE_HEADER_NAME]:
-            # take the url apart and scrub away the signed signature
-            scheme, netloc, path, params, query, fragment = urlparse(clean_headers[_COPY_SOURCE_HEADER_NAME])
-            parsed_qs = dict(parse_qsl(query))
-            parsed_qs[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE
-
-            # the SAS needs to be put back together
-            clean_headers[_COPY_SOURCE_HEADER_NAME] = urlunparse(
-                (scheme, netloc, path, params, urlencode(parsed_qs), fragment))
-        return clean_headers
-
-    @staticmethod
-    def _scrub_query_parameters(query):
-        # make a copy to avoid contaminating the request
-        clean_queries = query.copy()
-
-        if _QueryStringConstants.SIGNED_SIGNATURE in clean_queries:
-            clean_queries[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE
-        return clean_queries
-
-    def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None):
-        '''
-        Sends the request and return response. Catches HTTPError and hands it
-        to error handler
-        '''
-        operation_context = operation_context or _OperationContext()
-        retry_context = RetryContext()
-        retry_context.is_emulated = self.is_emulated
-
-        # if request body is a stream, we need to remember its current position in case retries happen
-        if hasattr(request.body, 'read'):
-            try:
-                retry_context.body_position = request.body.tell()
-            except (AttributeError, UnsupportedOperation):
-                # if body position cannot be obtained, then retries will not work
-                pass
-
-        # Apply the appropriate host based on the location mode
-        self._apply_host(request, operation_context, retry_context)
-
-        # Apply common settings to the request
-        _update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
-        client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
-
-        while True:
-            try:
-                try:
-                    # Execute the request callback 
-                    if self.request_callback:
-                        self.request_callback(request)
-
-                    # Add date and auth after the callback so date doesn't get too old and 
-                    # authentication is still correct if signed headers are added in the request 
-                    # callback. This also ensures retry policies with long back offs 
-                    # will work as it resets the time sensitive headers.
-                    _add_date_header(request)
-
-                    try:
-                        # request can be signed individually
-                        self.authentication.sign_request(request)
-                    except AttributeError:
-                        # session can also be signed
-                        self.request_session = self.authentication.signed_session(self.request_session)
-
-                    # Set the request context
-                    retry_context.request = request
-
-                    # Log the request before it goes out
-                    # Avoid unnecessary scrubbing if the logger is not on
-                    if logger.isEnabledFor(logging.INFO):
-                        logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
-                                    client_request_id_prefix,
-                                    request.method,
-                                    request.path,
-                                    self._scrub_query_parameters(request.query),
-                                    str(self._scrub_headers(request.headers)).replace('\n', ''))
-
-                    # Perform the request
-                    response = self._httpclient.perform_request(request)
-
-                    # Execute the response callback
-                    if self.response_callback:
-                        self.response_callback(response)
-
-                    # Set the response context
-                    retry_context.response = response
-
-                    # Log the response when it comes back
-                    logger.info("%s Receiving Response: "
-                                "%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
-                                client_request_id_prefix,
-                                self.extract_date_and_request_id(retry_context),
-                                response.status,
-                                response.message,
-                                str(response.headers).replace('\n', ''))
-
-                    # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
-                    if response.status >= 300:
-                        # This exception will be caught by the general error handler
-                        # and raised as an azure http exception
-                        _http_error_handler(
-                            HTTPError(response.status, response.message, response.headers, response.body))
-
-                    # Parse the response
-                    if parser:
-                        if parser_args:
-                            args = [response]
-                            args.extend(parser_args)
-                            return parser(*args)
-                        else:
-                            return parser(response)
-                    else:
-                        return
-                except AzureException as ex:
-                    retry_context.exception = ex
-                    raise ex
-                except Exception as ex:
-                    retry_context.exception = ex
-                    raise _wrap_exception(ex, AzureException)
-
-            except AzureException as ex:
-                # only parse the strings used for logging if logging is at least enabled for CRITICAL
-                exception_str_in_one_line = ''
-                status_code = ''
-                timestamp_and_request_id = ''
-                if logger.isEnabledFor(logging.CRITICAL):
-                    exception_str_in_one_line = str(ex).replace('\n', '')
-                    status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
-                    timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
-
-                # if the http error was expected, we should short-circuit
-                if isinstance(ex, AzureHttpError) and expected_errors is not None and ex.error_code in expected_errors:
-                    logger.info("%s Received expected http error: "
-                                "%s, HTTP status code=%s, Exception=%s.",
-                                client_request_id_prefix,
-                                timestamp_and_request_id,
-                                status_code,
-                                exception_str_in_one_line)
-                    raise ex
-                elif isinstance(ex, AzureSigningError):
-                    logger.info("%s Unable to sign the request: Exception=%s.",
-                                client_request_id_prefix,
-                                exception_str_in_one_line)
-                    raise ex
-
-                logger.info("%s Operation failed: checking if the operation should be retried. "
-                            "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
-                            client_request_id_prefix,
-                            retry_context.count if hasattr(retry_context, 'count') else 0,
-                            timestamp_and_request_id,
-                            status_code,
-                            exception_str_in_one_line)
-
-                # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
-                # will not be resolved with retries.
-                if str(ex) == _ERROR_DECRYPTION_FAILURE:
-                    logger.error("%s Encountered decryption failure: this cannot be retried. "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-
-                # Determine whether a retry should be performed and if so, how 
-                # long to wait before performing retry.
-                retry_interval = self.retry(retry_context)
-                if retry_interval is not None:
-                    # Execute the callback
-                    if self.retry_callback:
-                        self.retry_callback(retry_context)
-
-                    logger.info(
-                        "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
-                        client_request_id_prefix,
-                        retry_context.count,
-                        retry_interval)
-
-                    # Sleep for the desired retry interval
-                    sleep(retry_interval)
-                else:
-                    logger.error("%s Retry policy did not allow for a retry: "
-                                 "%s, HTTP status code=%s, Exception=%s.",
-                                 client_request_id_prefix,
-                                 timestamp_and_request_id,
-                                 status_code,
-                                 exception_str_in_one_line)
-                    raise ex
-            finally:
-                # If this is a location locked operation and the location is not set, 
-                # this is the first request of that operation. Set the location to 
-                # be used for subsequent requests in the operation.
-                if operation_context.location_lock and not operation_context.host_location:
-                    # note: to cover the emulator scenario, the host_location is grabbed
-                    # from request.host_locations(which includes the dev account name)
-                    # instead of request.host(which at this point no longer includes the dev account name)
-                    operation_context.host_location = {
-                        retry_context.location_mode: request.host_locations[retry_context.location_mode]}
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/tokencredential.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/tokencredential.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/common/tokencredential.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/common/tokencredential.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import requests
-
-
-class TokenCredential(object):
-    """
-    Represents a token credential that is used to authorize HTTPS requests.
-    The token can be updated by the user.
-
-    :ivar str token:
-        The authorization token. It can be set by the user at any point in a thread-safe way.
-    """
-
-    def __init__(self, initial_value=None):
-        """
-        :param initial_value: initial value for the token.
-        """
-        self.token = initial_value
-
-    def signed_session(self, session=None):
-        """
-        Sign requests session with the token. This method is called every time a request is going on the wire.
-        The user is responsible for updating the token with the preferred tool/SDK.
-        In general there are two options:
-            - override this method to update the token in a preferred way and set Authorization header on session
-            - not override this method, and have a timer that triggers periodically to update the token on this class
-
-        The second option is recommended as it tends to be more performance-friendly.
-
-        :param session: The session to configure for authentication
-        :type session: requests.Session
-        :rtype: requests.Session
-        """
-        session = session or requests.Session()
-        session.headers['Authorization'] = "Bearer {}".format(self.token)
-
-        return session
-
-    def token(self, new_value):
-        """
-        :param new_value: new value to be set as the token.
-        """
-        self.token = new_value
\ No newline at end of file
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/__init__.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from .fileservice import FileService
-from .models import (
-    Share,
-    ShareProperties,
-    File,
-    FileProperties,
-    Directory,
-    DirectoryProperties,
-    FileRange,
-    ContentSettings,
-    CopyProperties,
-    SharePermissions,
-    FilePermissions,
-    DeleteSnapshot,
-)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_constants.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,11 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '2.0.1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2018-11-09'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,308 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-from .models import (
-    Share,
-    Directory,
-    File,
-    Handle,
-    FileProperties,
-    FileRange,
-    ShareProperties,
-    DirectoryProperties,
-)
-from ..common.models import (
-    _list,
-)
-from ..common._deserialization import (
-    _parse_properties,
-    _parse_metadata,
-)
-from ..common._error import _validate_content_match
-from ..common._common_conversion import (
-    _get_content_md5,
-    _to_str,
-)
-
-
-def _parse_snapshot_share(response, name):
-    '''
-    Extracts snapshot return header.
-    '''
-    snapshot = response.headers.get('x-ms-snapshot')
-
-    return _parse_share(response, name, snapshot)
-
-
-def _parse_share(response, name, snapshot=None):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, ShareProperties)
-    return Share(name, props, metadata, snapshot)
-
-
-def _parse_directory(response, name):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, DirectoryProperties)
-    return Directory(name, props, metadata)
-
-
-def _parse_file(response, name, validate_content=False):
-    if response is None:
-        return None
-
-    metadata = _parse_metadata(response)
-    props = _parse_properties(response, FileProperties)
-
-    # For range gets, only look at 'x-ms-content-md5' for overall MD5
-    content_settings = getattr(props, 'content_settings')
-    if 'content-range' in response.headers:
-        if 'x-ms-content-md5' in response.headers:
-            setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5']))
-        else:
-            delattr(content_settings, 'content_md5')
-
-    if validate_content:
-        computed_md5 = _get_content_md5(response.body)
-        _validate_content_match(response.headers['content-md5'], computed_md5)
-
-    return File(name, response.body, props, metadata)
-
-
-def _convert_xml_to_shares(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults AccountName="https://myaccount.file.core.windows.net">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Shares>
-        <Share>
-          <Name>share-name</Name>
-          <Snapshot>date-time-value</Snapshot>
-          <Properties>
-            <Last-Modified>date/time-value</Last-Modified>
-            <Etag>etag</Etag>
-            <Quota>max-share-size</Quota>
-          </Properties>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Share>
-      </Shares>
-      <NextMarker>marker-value</NextMarker>
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    shares = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(shares, 'next_marker', next_marker)
-
-    shares_element = list_element.find('Shares')
-
-    for share_element in shares_element.findall('Share'):
-        # Name element
-        share = Share()
-        share.name = share_element.findtext('Name')
-
-        # Snapshot
-        share.snapshot = share_element.findtext('Snapshot')
-
-        # Metadata
-        metadata_root_element = share_element.find('Metadata')
-        if metadata_root_element is not None:
-            share.metadata = dict()
-            for metadata_element in metadata_root_element:
-                share.metadata[metadata_element.tag] = metadata_element.text
-
-        # Properties
-        properties_element = share_element.find('Properties')
-        share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
-        share.properties.etag = properties_element.findtext('Etag')
-        share.properties.quota = int(properties_element.findtext('Quota'))
-
-        # Add share to list
-        shares.append(share)
-
-    return shares
-
-
-def _convert_xml_to_directories_and_files(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.file.core.windows.net/" ShareName="myshare" DirectoryPath="directory-path">
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Entries>
-        <File>
-          <Name>file-name</Name>
-          <Properties>
-            <Content-Length>size-in-bytes</Content-Length>
-          </Properties>
-        </File>
-        <Directory>
-          <Name>directory-name</Name>
-        </Directory>
-      </Entries>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    entries = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(entries, 'next_marker', next_marker)
-
-    entries_element = list_element.find('Entries')
-
-    for file_element in entries_element.findall('File'):
-        # Name element
-        file = File()
-        file.name = file_element.findtext('Name')
-
-        # Properties
-        properties_element = file_element.find('Properties')
-        file.properties.content_length = int(properties_element.findtext('Content-Length'))
-
-        # Add file to list
-        entries.append(file)
-
-    for directory_element in entries_element.findall('Directory'):
-        # Name element
-        directory = Directory()
-        directory.name = directory_element.findtext('Name')
-
-        # Add directory to list
-        entries.append(directory)
-
-    return entries
-
-
-def _convert_xml_to_handles(response):
-    """
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults>
-        <Entries>
-            <Handle>
-                <HandleId>21123954401</HandleId>
-                <Path />
-                <FileId>0</FileId>
-                <ParentId>0</ParentId>
-                <SessionId>9385737614310506553</SessionId>
-                <ClientIp>167.220.2.92:27553</ClientIp>
-                <OpenTime>Fri, 03 May 2019 05:59:43 GMT</OpenTime>
-            </Handle>
-            ...
-        </Entries>
-        <NextMarker />
-    </EnumerationResults>'
-    """
-    if response is None or response.body is None:
-        return None
-
-    entries = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(entries, 'next_marker', next_marker)
-
-    handles_list_element = list_element.find('Entries')
-
-    for handle_element in handles_list_element.findall('Handle'):
-        # Name element
-        handle = Handle()
-        handle.handle_id = handle_element.findtext('HandleId')
-        handle.path = handle_element.findtext('Path')
-        handle.file_id = handle_element.findtext('FileId')
-        handle.parent_id = handle_element.findtext('ParentId')
-        handle.session_id = handle_element.findtext('SessionId')
-        handle.client_ip = handle_element.findtext('ClientIp')
-        handle.open_time = parser.parse(handle_element.findtext('OpenTime'))
-
-        last_connect_time_string = handle_element.findtext('LastReconnectTime')
-        if last_connect_time_string is not None:
-            handle.last_reconnect_time = parser.parse(last_connect_time_string)
-
-        # Add file to list
-        entries.append(handle)
-
-    return entries
-
-
-def _parse_close_handle_response(response):
-    if response is None or response.body is None:
-        return 0
-
-    results = _list()
-    results.append(int(response.headers['x-ms-number-of-handles-closed']))
-
-    next_marker = None if 'x-ms-marker' not in response.headers else response.headers['x-ms-marker']
-    setattr(results, 'next_marker', next_marker)
-    return results
-
-
-def _convert_xml_to_ranges(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <Ranges>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-      <Range>
-        <Start>Start Byte</Start>
-        <End>End Byte</End>
-      </Range>
-    </Ranges>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    ranges = list()
-    ranges_element = ETree.fromstring(response.body)
-
-    for range_element in ranges_element.findall('Range'):
-        # Parse range
-        range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End')))
-
-        # Add range to list
-        ranges.append(range)
-
-    return ranges
-
-
-def _convert_xml_to_share_stats(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <ShareStats>
-       <ShareUsageBytes>15</ShareUsageBytes>
-    </ShareStats>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    share_stats_element = ETree.fromstring(response.body)
-    return int(share_stats_element.findtext('ShareUsageBytes'))
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_download_chunking.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_download_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_download_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_download_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import threading
-
-
-def _download_file_chunks(file_service, share_name, directory_name, file_name,
-                          download_size, block_size, progress, start_range, end_range,
-                          stream, max_connections, progress_callback, validate_content,
-                          timeout, operation_context, snapshot):
-
-    downloader_class = _ParallelFileChunkDownloader if max_connections > 1 else _SequentialFileChunkDownloader
-
-    downloader = downloader_class(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        download_size,
-        block_size,
-        progress,
-        start_range,
-        end_range,
-        stream,
-        progress_callback,
-        validate_content,
-        timeout,
-        operation_context,
-        snapshot,
-    )
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
-    else:
-        for chunk in downloader.get_chunk_offsets():
-            downloader.process_chunk(chunk)
-
-
-class _FileChunkDownloader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name,
-                 download_size, chunk_size, progress, start_range, end_range,
-                 stream, progress_callback, validate_content, timeout, operation_context, snapshot):
-        # identifiers for the file
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-
-        # information on the download range/chunk size
-        self.chunk_size = chunk_size
-        self.download_size = download_size
-        self.start_index = start_range
-        self.file_end = end_range
-
-        # the destination that we will write to
-        self.stream = stream
-
-        # progress related
-        self.progress_callback = progress_callback
-        self.progress_total = progress
-
-        # parameters for each get file operation
-        self.validate_content = validate_content
-        self.timeout = timeout
-        self.operation_context = operation_context
-        self.snapshot = snapshot
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.file_end:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        if chunk_start + self.chunk_size > self.file_end:
-            chunk_end = self.file_end
-        else:
-            chunk_end = chunk_start + self.chunk_size
-
-        chunk_data = self._download_chunk(chunk_start, chunk_end).content
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    # should be provided by the subclass
-    def _update_progress(self, length):
-        pass
-
-    # should be provided by the subclass
-    def _write_to_stream(self, chunk_data, chunk_start):
-        pass
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        return self.file_service._get_file(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            start_range=chunk_start,
-            end_range=chunk_end - 1,
-            validate_content=self.validate_content,
-            timeout=self.timeout,
-            _context=self.operation_context,
-            snapshot=self.snapshot
-        )
-
-
-class _ParallelFileChunkDownloader(_FileChunkDownloader):
-    def __init__(self, file_service, share_name, directory_name, file_name,
-                 download_size, chunk_size, progress, start_range, end_range,
-                 stream, progress_callback, validate_content, timeout, operation_context, snapshot):
-        super(_ParallelFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name,
-                                                           download_size, chunk_size, progress, start_range, end_range,
-                                                           stream, progress_callback, validate_content, timeout,
-                                                           operation_context, snapshot)
-
-        # for a parallel download, the stream is always seekable, so we note down the current position
-        # in order to seek to the right place when out-of-order chunks come in
-        self.stream_start = stream.tell()
-
-        # since parallel operations are going on
-        # it is essential to protect the writing and progress reporting operations
-        self.stream_lock = threading.Lock()
-        self.progress_lock = threading.Lock()
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            with self.progress_lock:
-                self.progress_total += length
-                total_so_far = self.progress_total
-            self.progress_callback(total_so_far, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        with self.stream_lock:
-            self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-            self.stream.write(chunk_data)
-
-
-class _SequentialFileChunkDownloader(_FileChunkDownloader):
-    def __init__(self, file_service, share_name, directory_name, file_name, download_size, chunk_size, progress,
-                 start_range, end_range, stream, progress_callback, validate_content, timeout, operation_context,
-                 snapshot):
-        super(_SequentialFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name,
-                                                             download_size, chunk_size, progress, start_range,
-                                                             end_range, stream, progress_callback, validate_content,
-                                                             timeout, operation_context, snapshot)
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            self.progress_total += length
-            self.progress_callback(self.progress_total, self.download_size)
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        # chunk_start is ignored in the case of sequential download since we cannot seek the destination stream
-        self.stream.write(chunk_data)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_serialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,66 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _str
-from ..common._error import (
-    _validate_not_none,
-    _ERROR_START_END_NEEDED_FOR_MD5,
-    _ERROR_RANGE_TOO_LARGE_FOR_MD5,
-)
-
-
-def _get_path(share_name=None, directory_name=None, file_name=None):
-    '''
-    Creates the path to access a file resource.
-
-    share_name:
-        Name of share.
-    directory_name:
-        The path to the directory.
-    file_name:
-        Name of file.
-    '''
-    if share_name and directory_name and file_name:
-        return '/{0}/{1}/{2}'.format(
-            _str(share_name),
-            _str(directory_name),
-            _str(file_name))
-    elif share_name and directory_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(directory_name))
-    elif share_name and file_name:
-        return '/{0}/{1}'.format(
-            _str(share_name),
-            _str(file_name))
-    elif share_name:
-        return '/{0}'.format(_str(share_name))
-    else:
-        return '/'
-
-
-def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True,
-                                       end_range_required=True, check_content_md5=False):
-    # If end range is provided, start range must be provided
-    if start_range_required or end_range is not None:
-        _validate_not_none('start_range', start_range)
-    if end_range_required:
-        _validate_not_none('end_range', end_range)
-
-    # Format based on whether end_range is present
-    request.headers = request.headers or {}
-    if end_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
-
-        request.headers['x-ms-range-get-content-md5'] = 'true'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_upload_chunking.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_upload_chunking.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/_upload_chunking.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/_upload_chunking.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,133 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import threading
-
-
-def _upload_file_chunks(file_service, share_name, directory_name, file_name,
-                        file_size, block_size, stream, max_connections,
-                        progress_callback, validate_content, timeout):
-    uploader = _FileChunkUploader(
-        file_service,
-        share_name,
-        directory_name,
-        file_name,
-        file_size,
-        block_size,
-        stream,
-        max_connections > 1,
-        progress_callback,
-        validate_content,
-        timeout
-    )
-
-    if progress_callback is not None:
-        progress_callback(0, file_size)
-
-    if max_connections > 1:
-        import concurrent.futures
-        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
-        range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
-    else:
-        if file_size is not None:
-            range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
-        else:
-            range_ids = uploader.process_all_unknown_size()
-
-    return range_ids
-
-
-class _FileChunkUploader(object):
-    def __init__(self, file_service, share_name, directory_name, file_name,
-                 file_size, chunk_size, stream, parallel, progress_callback,
-                 validate_content, timeout):
-        self.file_service = file_service
-        self.share_name = share_name
-        self.directory_name = directory_name
-        self.file_name = file_name
-        self.file_size = file_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_callback = progress_callback
-        self.progress_total = 0
-        self.progress_lock = threading.Lock() if parallel else None
-        self.validate_content = validate_content
-        self.timeout = timeout
-
-    def get_chunk_offsets(self):
-        index = 0
-        if self.file_size is None:
-            # we don't know the size of the stream, so we have no
-            # choice but to seek
-            while True:
-                data = self._read_from_stream(index, 1)
-                if not data:
-                    break
-                yield index
-                index += self.chunk_size
-        else:
-            while index < self.file_size:
-                yield index
-                index += self.chunk_size
-
-    def process_chunk(self, chunk_offset):
-        size = self.chunk_size
-        if self.file_size is not None:
-            size = min(size, self.file_size - chunk_offset)
-        chunk_data = self._read_from_stream(chunk_offset, size)
-        return self._upload_chunk_with_progress(chunk_offset, chunk_data)
-
-    def process_all_unknown_size(self):
-        assert self.stream_lock is None
-        range_ids = []
-        index = 0
-        while True:
-            data = self._read_from_stream(None, self.chunk_size)
-            if data:
-                index += len(data)
-                range_id = self._upload_chunk_with_progress(index, data)
-                range_ids.append(range_id)
-            else:
-                break
-
-        return range_ids
-
-    def _read_from_stream(self, offset, count):
-        if self.stream_lock is not None:
-            with self.stream_lock:
-                self.stream.seek(self.stream_start + offset)
-                data = self.stream.read(count)
-        else:
-            data = self.stream.read(count)
-        return data
-
-    def _update_progress(self, length):
-        if self.progress_callback is not None:
-            if self.progress_lock is not None:
-                with self.progress_lock:
-                    self.progress_total += length
-                    total = self.progress_total
-            else:
-                self.progress_total += length
-                total = self.progress_total
-            self.progress_callback(total, self.file_size)
-
-    def _upload_chunk_with_progress(self, chunk_start, chunk_data):
-        chunk_end = chunk_start + len(chunk_data) - 1
-        self.file_service.update_range(
-            self.share_name,
-            self.directory_name,
-            self.file_name,
-            chunk_data,
-            chunk_start,
-            chunk_end,
-            self.validate_content,
-            timeout=self.timeout
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end)
-        self._update_progress(len(chunk_data))
-        return range_id
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/fileservice.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/fileservice.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/fileservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/fileservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2698 +0,0 @@
-# coding: utf-8
-
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-import math
-from os import path
-
-from azure.common import AzureHttpError
-
-from ..common._auth import (
-    _StorageSharedKeyAuthentication,
-    _StorageSASAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-    _get_content_md5,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-    DEV_ACCOUNT_NAME,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _parse_metadata,
-    _parse_properties,
-    _parse_length_from_content_range,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _validate_type_bytes,
-    _ERROR_VALUE_NEGATIVE,
-    _ERROR_STORAGE_MISSING_INFO,
-    _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES,
-    _ERROR_PARALLEL_NOT_SEEKABLE,
-    _validate_access_policies,
-)
-from ..common._http import HTTPRequest
-from ..common._serialization import (
-    _get_request_body,
-    _get_data_bytes_only,
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    FileSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_shares,
-    _convert_xml_to_directories_and_files,
-    _convert_xml_to_handles,
-    _parse_close_handle_response,
-    _convert_xml_to_ranges,
-    _convert_xml_to_share_stats,
-    _parse_file,
-    _parse_share,
-    _parse_snapshot_share,
-    _parse_directory,
-)
-from ._download_chunking import _download_file_chunks
-from ._serialization import (
-    _get_path,
-    _validate_and_format_range_headers,
-)
-from ._upload_chunking import _upload_file_chunks
-from .models import (
-    FileProperties,
-)
-
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-_SHARE_NOT_FOUND_ERROR_CODE = 'ShareNotFound'
-_PARENT_NOT_FOUND_ERROR_CODE = 'ParentNotFound'
-_RESOURCE_NOT_FOUND_ERROR_CODE = 'ResourceNotFound'
-_RESOURCE_ALREADY_EXISTS_ERROR_CODE = 'ResourceAlreadyExists'
-_SHARE_ALREADY_EXISTS_ERROR_CODE = 'ShareAlreadyExists'
-
-_GB = 1024 * 1024 * 1024
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    from cStringIO import StringIO as BytesIO
-
-
-class FileService(StorageClient):
-    '''
-    The Server Message Block (SMB) protocol is the preferred file share protocol
-    used on premise today. The Microsoft Azure File service enables customers to
-    leverage the availability and scalability of Azure's Cloud Infrastructure as
-    a Service (IaaS) SMB without having to rewrite SMB client applications.
-
-    The Azure File service also offers a compelling alternative to traditional
-    Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which
-    are often complex and expensive to install, configure, and operate.
-
-    :ivar int MAX_SINGLE_GET_SIZE:
-        The size of the first range get performed by get_file_to_* methods if
-        max_connections is greater than 1. Less data will be returned if the
-        file is smaller than this.
-    :ivar int MAX_CHUNK_GET_SIZE:
-        The size of subsequent range gets performed by get_file_to_* methods if
-        max_connections is greater than 1 and the file is larger than MAX_SINGLE_GET_SIZE.
-        Less data will be returned if the remainder of the file is smaller than
-        this. If this is set to larger than 4MB, content_validation will throw an
-        error if enabled. However, if content_validation is not desired a size
-        greater than 4MB may be optimal. Setting this below 4MB is not recommended.
-    :ivar int MAX_RANGE_SIZE:
-        The size of the ranges put by create_file_from_* methods. Smaller ranges
-        may be put if there is less data provided. The maximum range size the service
-        supports is 4MB.
-    '''
-    MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
-    MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024
-    MAX_RANGE_SIZE = 4 * 1024 * 1024
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
-                 request_session=None, connection_string=None, socket_timeout=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests
-            signed with an account key and to construct the storage endpoint. It
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication.
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests
-             instead of the account key. If account key and sas token are both
-             specified, account key will be used to sign.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults
-            to Azure (core.windows.net). Override this to use the China cloud
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'file',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(FileService, self).__init__(service_params)
-
-        if self.account_name == DEV_ACCOUNT_NAME:
-            raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def make_file_url(self, share_name, directory_name, file_name,
-                      protocol=None, sas_token=None):
-        '''
-        Creates the url to access a file.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file.
-        :param str protocol:
-            Protocol to use: 'http' or 'https'. If not specified, uses the
-            protocol specified when FileService was initialized.
-        :param str sas_token:
-            Shared access signature token created with
-            generate_shared_access_signature.
-        :return: file access URL.
-        :rtype: str
-        '''
-
-        if directory_name is None:
-            url = '{}://{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                file_name,
-            )
-        else:
-            url = '{}://{}/{}/{}/{}'.format(
-                protocol or self.protocol,
-                self.primary_endpoint,
-                share_name,
-                directory_name,
-                file_name,
-            )
-
-        if sas_token:
-            url += '?' + sas_token
-
-        return url
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the file service.
-        Use the returned signature with the sas_token parameter of the FileService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.FILE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_share_shared_access_signature(self, share_name,
-                                               permission=None,
-                                               expiry=None,
-                                               start=None,
-                                               id=None,
-                                               ip=None,
-                                               protocol=None,
-                                               cache_control=None,
-                                               content_disposition=None,
-                                               content_encoding=None,
-                                               content_language=None,
-                                               content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use :func:`~set_share_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_share(
-            share_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def generate_file_shared_access_signature(self, share_name,
-                                              directory_name=None,
-                                              file_name=None,
-                                              permission=None,
-                                              expiry=None,
-                                              start=None,
-                                              id=None,
-                                              ip=None,
-                                              protocol=None,
-                                              cache_control=None,
-                                              content_disposition=None,
-                                              content_encoding=None,
-                                              content_language=None,
-                                              content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. Possible values are
-            both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
-            is https,http. Note that HTTP only is not a permitted value.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = FileSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_file(
-            share_name,
-            directory_name,
-            file_name,
-            permission,
-            expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-            cache_control=cache_control,
-            content_disposition=content_disposition,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_type=content_type,
-        )
-
-    def set_file_service_properties(self, hour_metrics=None, minute_metrics=None,
-                                    cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's File service, including
-        Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the 
-        existing settings on the service for that functionality are preserved.
-
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for files.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for files.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors))
-
-        self._perform_request(request)
-
-    def get_file_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's File service, including
-        Azure Storage Analytics.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The file service properties.
-        :rtype:
-            :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def list_shares(self, prefix=None, marker=None, num_results=None,
-                    include_metadata=False, timeout=None, include_snapshots=False):
-        '''
-        Returns a generator to list the shares under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all shares have been returned or num_results 
-        is reached.
-
-        If num_results is specified and the account has more than that number of 
-        shares, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param int num_results:
-            Specifies the maximum number of shares to return.
-        :param bool include_metadata:
-            Specifies that share metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param bool include_snapshots:
-            Specifies that share snapshots be returned in the response.
-        '''
-        include = 'snapshots' if include_snapshots else None
-        if include_metadata:
-            if include is not None:
-                include = include + ',metadata'
-            else:
-                include = 'metadata'
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
-                  'include': include, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_shares(**kwargs)
-
-        return ListGenerator(resp, self._list_shares, (), kwargs)
-
-    def _list_shares(self, prefix=None, marker=None, max_results=None,
-                     include=None, timeout=None, _context=None):
-        '''
-        Returns a list of the shares under the specified account.
-
-        :param str prefix:
-            Filters the results to return only shares whose names
-            begin with the specified prefix.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of shares to return. A single list
-            request may return up to 1000 shares and potentially a continuation
-            token which should be followed to get additional resutls.
-        :param string include:
-            Include this parameter to specify that either the share's
-            metadata, snapshots or both be returned as part of the response body. set this
-            parameter to string 'metadata' to get share's metadata. set this parameter to 'snapshots'
-            to get all the share snapshots. for both use 'snapshots,metadata'.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_shares, operation_context=_context)
-
-    def create_share(self, share_name, metadata=None, quota=None,
-                     fail_on_exist=False, timeout=None):
-        '''
-        Creates a new share under the specified account. If the share
-        with the same name already exists, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of share to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str)
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5TB (5120).
-        :param bool fail_on_exist:
-            Specify whether to throw an exception when the share exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if share is created, False if share already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request, expected_errors=[_SHARE_ALREADY_EXISTS_ERROR_CODE])
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None):
-        '''
-        Creates a snapshot of an existing share under the specified account.
-
-        :param str share_name:
-            The name of the share to create a snapshot of.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: a dict of str to str:
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be
-            greater than 0, and less than or equal to 5TB (5120).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: snapshot properties
-        :rtype: azure.storage.file.models.Share
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'snapshot',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_snapshot_share, [share_name])
-
-    def get_share_properties(self, share_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified share. The data returned does not include the shares's
-        list of files or directories.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A Share that exposes properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.Share`
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_share, [share_name])
-
-    def set_share_properties(self, share_name, quota, timeout=None):
-        '''
-        Sets service-defined properties for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes. Must be 
-            greater than 0, and less than or equal to 5 TB (5120 GB).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('quota', quota)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-share-quota': _int_to_str(quota)
-        }
-
-        self._perform_request(request)
-
-    def get_share_metadata(self, share_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the share metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_share_metadata(self, share_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        share. Each call to this operation replaces all existing metadata
-        attached to the share. To remove all metadata from the share,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param metadata:
-            A dict containing name-value pairs to associate with the share as 
-            metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def get_share_acl(self, share_name, timeout=None):
-        '''
-        Gets the permissions for the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: A dictionary of access policies associated with the share.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_share_acl(self, share_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets the permissions for the specified share or stored access 
-        policies that may be used with Shared Access Signatures.
-
-        :param str share_name:
-            Name of existing share.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the share. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-
-        self._perform_request(request)
-
-    def get_share_stats(self, share_name, timeout=None):
-        '''
-        Gets the approximate size of the data stored on the share,
-        rounded up to the nearest gigabyte.
-        
-        Note that this value may not include all recently created
-        or recently re-sized files.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the approximate size of the data stored on the share.
-        :rtype: int
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        usage = self._perform_request(request, _convert_xml_to_share_stats)
-        return int(math.ceil(float(usage) / _GB))
-
-    def get_share_stats_in_bytes(self, share_name, timeout=None):
-        """
-        Gets the approximate size of the data stored on the share in bytes.
-
-        Note that this value may not include all recently created
-        or recently re-sized files.
-
-        :param str share_name:
-            Name of existing share.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: the approximate size of the data stored on the share.
-        :rtype: int
-        """
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.query = {
-            'restype': 'share',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_share_stats)
-
-    def delete_share(self, share_name, fail_not_exist=False, timeout=None, snapshot=None, delete_snapshots=None):
-        '''
-        Marks the specified share for deletion. If the share
-        does not exist, the operation fails on the service. By 
-        default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of share to delete.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the share doesn't
-            exist. False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-            Specify this argument to delete a specific snapshot only.
-            delete_snapshots must be None if this is specified.
-        :param ~azure.storage.file.models.DeleteSnapshot delete_snapshots:
-            To delete a share that has snapshots, this must be specified as DeleteSnapshot.Include.
-        :return: True if share is deleted, False share doesn't exist.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name)
-        request.headers = {
-            'x-ms-delete-snapshots': _to_str(delete_snapshots)
-        }
-        request.query = {
-            'restype': 'share',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request, expected_errors=[_SHARE_NOT_FOUND_ERROR_CODE])
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def create_directory(self, share_name, directory_name, metadata=None,
-                         fail_on_exist=False, timeout=None):
-        '''
-        Creates a new directory under the specified share or parent directory. 
-        If the directory with the same name already exists, the operation fails
-        on the service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_on_exists.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to create, including the path to the parent 
-            directory.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :type metadata: dict(str, str):
-        :param bool fail_on_exist:
-            specify whether to throw an exception when the directory exists.
-            False by default.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is created, False if directory already exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        if not fail_on_exist:
-            try:
-                self._perform_request(request, expected_errors=_RESOURCE_ALREADY_EXISTS_ERROR_CODE)
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def delete_directory(self, share_name, directory_name,
-                         fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified empty directory. Note that the directory must
-        be empty before it can be deleted. Attempting to delete directories 
-        that are not empty will fail.
-
-        If the directory does not exist, the operation fails on the
-        service. By default, the exception is swallowed by the client.
-        To expose the exception, specify True for fail_not_exist.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            Name of directory to delete, including the path to the parent 
-            directory.
-        :param bool fail_not_exist:
-            Specify whether to throw an exception when the directory doesn't
-            exist.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: True if directory is deleted, False otherwise.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-        }
-
-        if not fail_not_exist:
-            try:
-                self._perform_request(request, expected_errors=[_RESOURCE_NOT_FOUND_ERROR_CODE])
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_directory_properties(self, share_name, directory_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata and system properties for the
-        specified directory. The data returned does not include the directory's
-        list of files.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-           The path to an existing directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: properties for the specified directory within a directory object.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :rtype: :class:`~azure.storage.file.models.Directory`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_directory, [directory_name])
-
-    def get_directory_metadata(self, share_name, directory_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified directory.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the directory metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None):
-        '''
-        Sets one or more user-defined name-value pairs for the specified
-        directory. Each call to this operation replaces all existing metadata
-        attached to the directory. To remove all metadata from the directory,
-        call this operation with no metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param metadata:
-            A dict containing name-value pairs to associate with the directory
-            as metadata. Example: {'category':'test'}
-        :type metadata: dict(str, str).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('directory_name', directory_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def list_directories_and_files(self, share_name, directory_name=None,
-                                   num_results=None, marker=None, timeout=None,
-                                   prefix=None, snapshot=None):
-
-        '''
-        Returns a generator to list the directories and files under the specified share.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all directories and files have been returned or
-        num_results is reached.
-
-        If num_results is specified and the share has more than that number of 
-        files and directories, the generator will have a populated next_marker 
-        field once it finishes. This marker can be used to create a new generator 
-        if more results are desired.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param int num_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            num_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting num_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str prefix:
-            List only the files and/or directories with the given prefix.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        '''
-        operation_context = _OperationContext(location_lock=True)
-        args = (share_name, directory_name)
-        kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout,
-                  '_context': operation_context, 'prefix': prefix, 'snapshot': snapshot}
-
-        resp = self._list_directories_and_files(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_directories_and_files, args, kwargs)
-
-    def _list_directories_and_files(self, share_name, directory_name=None,
-                                    marker=None, max_results=None, timeout=None,
-                                    prefix=None, _context=None, snapshot=None):
-        '''
-        Returns a list of the directories and files under the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str marker:
-            A string value that identifies the portion of the list
-            to be returned with the next list operation. The operation returns
-            a next_marker value within the response body if the list returned was
-            not complete. The marker value may then be used in a subsequent
-            call to request the next set of list items. The marker value is
-            opaque to the client.
-        :param int max_results:
-            Specifies the maximum number of files to return,
-            including all directory elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str prefix:
-            List only the files and/or directories with the given prefix.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        '''
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name)
-        request.query = {
-            'restype': 'directory',
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot)
-        }
-
-        return self._perform_request(request, _convert_xml_to_directories_and_files,
-                                     operation_context=_context)
-
-    def list_handles(self, share_name, directory_name=None, file_name=None, recursive=None,
-                     max_results=None, marker=None, snapshot=None, timeout=None):
-
-        """
-        Returns a generator to list opened handles on a directory or a file under the specified share.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all handles have been returned or
-        num_results is reached.
-
-        If num_results is specified and the share has more than that number of
-        files and directories, the generator will have a populated next_marker
-        field once it finishes. This marker can be used to create a new generator
-        if more results are desired.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified in the URI,
-            its files, its subdirectories and their files.
-        :param int max_results:
-            Specifies the maximum number of handles taken on files and/or directories to return.
-            If the request does not specify max_results or specifies a value greater than 5,000,
-            the server will return up to 5,000 items.
-            Setting max_results to a value less than or equal to zero results in error response code 400 (Bad Request).
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the
-            next_marker field of a previous generator object if max_results was
-            specified and that generator has finished enumerating results. If
-            specified, this generator will begin returning results from the point
-            where the previous generator stopped.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        operation_context = _OperationContext(location_lock=True)
-        args = (share_name, directory_name, file_name)
-        kwargs = {'marker': marker, 'max_results': max_results, 'timeout': timeout, 'recursive': recursive,
-                  '_context': operation_context, 'snapshot': snapshot}
-
-        resp = self._list_handles(*args, **kwargs)
-
-        return ListGenerator(resp, self._list_handles, args, kwargs)
-
-    def _list_handles(self, share_name, directory_name=None, file_name=None, recursive=None,
-                      marker=None, max_results=None, timeout=None, _context=None, snapshot=None):
-        """
-        Returns a list of the directories and files under the specified share.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified in the URI,
-            its files, its subdirectories and their files.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the
-            next_marker field of a previous generator object if max_results was
-            specified and that generator has finished enumerating results. If
-            specified, this generator will begin returning results from the point
-            where the previous generator stopped.
-        :param int max_results:
-            Specifies the maximum number of handles to return,
-            including all directory elements. If the request does not specify
-            max_results or specifies a value greater than 5,000, the server will
-            return up to 5,000 items. Setting max_results to a value less than
-            or equal to zero results in error response code 400 (Bad Request).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        """
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'listhandles',
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot)
-        }
-        request.headers = {
-            'x-ms-recursive': _to_str(recursive)
-        }
-
-        return self._perform_request(request, _convert_xml_to_handles,
-                                     operation_context=_context)
-
-    def close_handles(self, share_name, directory_name=None, file_name=None, recursive=None,
-                      handle_id=None, marker=None, snapshot=None, timeout=None):
-
-        """
-        Returns a generator to close opened handles on a directory or a file under the specified share.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all handles have been closed.
-        The yielded values represent the number of handles that were closed in each transaction.
-
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified in the URI,
-            its files, its subdirectories and their files.
-        :param str handle_id:
-            Required. Specifies handle ID opened on the file or directory to be closed.
-            Astrix (‘*’) is a wildcard that specifies all handles.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the
-            next_marker field of a previous generator object if it has not finished closing handles. If
-            specified, this generator will begin closing handles from the point
-            where the previous generator stopped.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        operation_context = _OperationContext(location_lock=True)
-        args = (share_name, directory_name, file_name)
-        kwargs = {'marker': marker, 'handle_id': handle_id, 'timeout': timeout, 'recursive': recursive,
-                  '_context': operation_context, 'snapshot': snapshot}
-
-        resp = self._close_handles(*args, **kwargs)
-
-        return ListGenerator(resp, self._close_handles, args, kwargs)
-
-    def _close_handles(self, share_name, directory_name=None, file_name=None, recursive=None, handle_id=None,
-                       marker=None, timeout=None, _context=None, snapshot=None):
-        """
-        Returns the number of handles that got closed.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified in the URI,
-            its files, its subdirectories and their files.
-        :param str handle_id:
-            Required. Specifies handle ID opened on the file or directory to be closed.
-            Astrix (‘*’) is a wildcard that specifies all handles.
-        :param str marker:
-            Specifies the maximum number of handles taken on files and/or directories to return.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        """
-        _validate_not_none('share_name', share_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'forceclosehandles',
-            'marker': _to_str(marker),
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot)
-        }
-        request.headers = {
-            'x-ms-recursive': _to_str(recursive),
-            'x-ms-handle-id': _to_str(handle_id),
-        }
-
-        return self._perform_request(request, _parse_close_handle_response, operation_context=_context)
-
-    def get_file_properties(self, share_name, directory_name, file_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file. Returns an instance of :class:`~azure.storage.file.models.File` with
-        :class:`~azure.storage.file.models.FileProperties` and a metadata dict.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: a file object including properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'HEAD'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)}
-
-        return self._perform_request(request, _parse_file, [file_name])
-
-    def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None):
-        '''
-        Returns a boolean indicating whether the share exists if only share name is
-        given. If directory_name is specificed a boolean will be returned indicating
-        if the directory exists. If file_name is specified as well, a boolean will be
-        returned indicating if the file exists.
-
-        :param str share_name:
-            Name of a share.
-        :param str directory_name:
-            The path to a directory.
-        :param str file_name:
-            Name of a file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A boolean indicating whether the resource exists.
-        :rtype: bool
-        '''
-        _validate_not_none('share_name', share_name)
-        try:
-            request = HTTPRequest()
-            request.method = 'HEAD' if file_name is not None else 'GET'
-            request.host_locations = self._get_host_locations()
-            request.path = _get_path(share_name, directory_name, file_name)
-
-            if file_name is not None:
-                restype = None
-                expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _PARENT_NOT_FOUND_ERROR_CODE]
-            elif directory_name is not None:
-                restype = 'directory'
-                expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _SHARE_NOT_FOUND_ERROR_CODE,
-                                   _PARENT_NOT_FOUND_ERROR_CODE]
-            else:
-                restype = 'share'
-                expected_errors = [_SHARE_NOT_FOUND_ERROR_CODE]
-
-            request.query = {
-                'restype': restype,
-                'timeout': _int_to_str(timeout),
-                'sharesnapshot': _to_str(snapshot)
-            }
-            self._perform_request(request, expected_errors=expected_errors)
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def resize_file(self, share_name, directory_name,
-                    file_name, content_length, timeout=None):
-        '''
-        Resizes a file to the specified size. If the specified byte
-        value is less than the current size of the file, then all
-        ranges above the specified byte value are cleared.
-        
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int content_length:
-            The length to resize the file to.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-content-length': _to_str(content_length)
-        }
-
-        self._perform_request(request)
-
-    def set_file_properties(self, share_name, directory_name, file_name,
-                            content_settings, timeout=None):
-        '''
-        Sets system properties on the file. If one property is set for the
-        content_settings, all properties will be overriden.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set the file properties.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_settings', content_settings)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = content_settings._to_headers()
-
-        self._perform_request(request)
-
-    def get_file_metadata(self, share_name, directory_name, file_name, timeout=None, snapshot=None):
-        '''
-        Returns all user-defined metadata for the specified file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return:
-            A dictionary representing the file metadata name, value pairs.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot),
-        }
-
-        return self._perform_request(request, _parse_metadata)
-
-    def set_file_metadata(self, share_name, directory_name,
-                          file_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata for the specified file as one or more
-        name-value pairs.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the file. To remove all
-            metadata from the file, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def copy_file(self, share_name, directory_name, file_name, copy_source,
-                  metadata=None, timeout=None):
-        '''
-        Copies a file asynchronously. This operation returns a copy operation 
-        properties object, including a copy ID you can use to check or abort the 
-        copy operation. The File service copies files on a best-effort basis.
-
-        If the destination file exists, it will be overwritten. The destination 
-        file cannot be modified while the copy operation is in progress.
-
-        :param str share_name:
-            Name of the destination share. The share must exist.
-        :param str directory_name:
-            Name of the destination directory. The directory must exist.
-        :param str file_name:
-            Name of the destination file. If the destination file exists, it will 
-            be overwritten. Otherwise, it will be created.
-        :param str copy_source:
-            A URL of up to 2 KB in length that specifies an Azure file or blob. 
-            The value should be URL-encoded as it would appear in a request URI. 
-            If the source is in another account, the source must either be public 
-            or must be authenticated via a shared access signature. If the source 
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.file.core.windows.net/myshare/mydir/myfile
-            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
-        :param metadata:
-            Name-value pairs associated with the file as metadata. If no name-value 
-            pairs are specified, the operation will copy the metadata from the 
-            source blob or file to the destination file. If one or more name-value 
-            pairs are specified, the destination file is created with the specified 
-            metadata, and the metadata is not copied from the source blob or file. 
-        :type metadata: dict(str, str).
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Copy operation properties such as status, source, and ID.
-        :rtype: :class:`~azure.storage.file.models.CopyProperties`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_source', copy_source)
-
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-copy-source': _to_str(copy_source),
-        }
-        _add_metadata_headers(metadata, request)
-
-        return self._perform_request(request, _parse_properties, [FileProperties]).copy
-
-    def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None):
-        '''
-         Aborts a pending copy_file operation, and leaves a destination file
-         with zero length and full metadata.
-
-        :param str share_name:
-             Name of destination share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-             Name of destination file.
-        :param str copy_id:
-            Copy identifier provided in the copy.id of the original
-            copy_file operation.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('copy_id', copy_id)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'copy',
-            'copyid': _to_str(copy_id),
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-copy-action': 'abort',
-        }
-
-        self._perform_request(request)
-
-    def delete_file(self, share_name, directory_name, file_name, timeout=None):
-        '''
-        Marks the specified file for deletion. The file is later
-        deleted during garbage collection.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-
-        self._perform_request(request)
-
-    def create_file(self, share_name, directory_name, file_name,
-                    content_length, content_settings=None, metadata=None,
-                    timeout=None):
-        '''
-        Creates a new file.
-
-        See create_file_from_* for high level functions that handle the
-        creation and upload of large files with automatic chunking and
-        progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param int content_length:
-            Length of the file in bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('content_length', content_length)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        request.headers = {
-            'x-ms-content-length': _to_str(content_length),
-            'x-ms-type': 'file'
-        }
-        _add_metadata_headers(metadata, request)
-        if content_settings is not None:
-            request.headers.update(content_settings._to_headers())
-
-        self._perform_request(request)
-
-    def create_file_from_path(self, share_name, directory_name, file_name,
-                              local_file_path, content_settings=None,
-                              metadata=None, validate_content=False, progress_callback=None,
-                              max_connections=2, timeout=None):
-        '''
-        Creates a new azure file from a local file path, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str local_file_path:
-            Path of the local file to upload as the file content.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used for setting file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('local_file_path', local_file_path)
-
-        count = path.getsize(local_file_path)
-        with open(local_file_path, 'rb') as stream:
-            self.create_file_from_stream(
-                share_name, directory_name, file_name, stream,
-                count, content_settings, metadata, validate_content, progress_callback,
-                max_connections, timeout)
-
-    def create_file_from_text(self, share_name, directory_name, file_name,
-                              text, encoding='utf-8', content_settings=None,
-                              metadata=None, validate_content=False, timeout=None):
-        '''
-        Creates a new file from str/unicode, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str text:
-            Text to upload to the file.
-        :param str encoding:
-            Python encoding to use to convert the text to bytes.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('text', text)
-
-        if not isinstance(text, bytes):
-            _validate_not_none('encoding', encoding)
-            text = text.encode(encoding)
-
-        self.create_file_from_bytes(
-            share_name, directory_name, file_name, text, count=len(text),
-            content_settings=content_settings, metadata=metadata,
-            validate_content=validate_content, timeout=timeout)
-
-    def create_file_from_bytes(
-            self, share_name, directory_name, file_name, file,
-            index=0, count=None, content_settings=None, metadata=None,
-            validate_content=False, progress_callback=None, max_connections=2,
-            timeout=None):
-        '''
-        Creates a new file from an array of bytes, or updates the content
-        of an existing file, with automatic chunking and progress
-        notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param str file:
-            Content of file as an array of bytes.
-        :param int index:
-            Start index in the array of bytes.
-        :param int count:
-            Number of bytes to upload. Set to None or negative value to upload
-            all bytes starting from index.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file', file)
-        _validate_type_bytes('file', file)
-
-        if index < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
-
-        if count is None or count < 0:
-            count = len(file) - index
-
-        stream = BytesIO(file)
-        stream.seek(index)
-
-        self.create_file_from_stream(
-            share_name, directory_name, file_name, stream, count,
-            content_settings, metadata, validate_content, progress_callback,
-            max_connections, timeout)
-
-    def create_file_from_stream(
-            self, share_name, directory_name, file_name, stream, count,
-            content_settings=None, metadata=None, validate_content=False,
-            progress_callback=None, max_connections=2, timeout=None):
-        '''
-        Creates a new file from a file/stream, or updates the content of an
-        existing file, with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of file to create or update.
-        :param io.IOBase stream:
-            Opened file/stream to upload as the file content.
-        :param int count:
-            Number of bytes to read from the stream. This is required, a
-            file cannot be created if the count is unknown.
-        :param ~azure.storage.file.models.ContentSettings content_settings:
-            ContentSettings object used to set file properties.
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :param bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage 
-            service checks the hash of the content that has arrived with the hash 
-            that was sent. This is primarily valuable for detecting bitflips on 
-            the wire if using http instead of https as https (the default) will 
-            already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) where
-            current is the number of bytes transfered so far and total is the
-            size of the file, or None if the total size is unknown.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            Maximum number of parallel connections to use. Note that parallel upload 
-            requires the stream to be seekable.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-        _validate_not_none('count', count)
-
-        if count < 0:
-            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
-
-        self.create_file(
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            content_settings,
-            metadata,
-            timeout
-        )
-
-        _upload_file_chunks(
-            self,
-            share_name,
-            directory_name,
-            file_name,
-            count,
-            self.MAX_RANGE_SIZE,
-            stream,
-            max_connections,
-            progress_callback,
-            validate_content,
-            timeout
-        )
-
-    def _get_file(self, share_name, directory_name, file_name,
-                  start_range=None, end_range=None, validate_content=False,
-                  timeout=None, _context=None, snapshot=None):
-        '''
-        Downloads a file's content, metadata, and properties. You can specify a
-        range if you don't need to download the file in its entirety. If no range
-        is specified, the full file will be downloaded.
-
-        See get_file_to_* for high level functions that handle the download
-        of large files with automatic chunking and progress notifications.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            When this is set to True and specified together with the Range header, 
-            the service returns the MD5 hash for the range, as long as the range 
-            is less than or equal to 4 MB in size.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with content, properties, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)}
-        _validate_and_format_range_headers(
-            request,
-            start_range,
-            end_range,
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=validate_content)
-
-        return self._perform_request(request, _parse_file,
-                                     [file_name, validate_content],
-                                     operation_context=_context)
-
-    def get_file_to_path(self, share_name, directory_name, file_name, file_path,
-                         open_mode='wb', start_range=None, end_range=None,
-                         validate_content=False, progress_callback=None,
-                         max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file to a file path, with automatic chunking and progress
-        notifications. Returns an instance of File with properties and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str file_path:
-            Path of file to write to.
-        :param str open_mode:
-            Mode to use when opening the file. Note that specifying append only 
-            open_mode prevents parallel download. So, max_connections must be set 
-            to 1 if this open_mode is used.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('file_path', file_path)
-        _validate_not_none('open_mode', open_mode)
-
-        if max_connections > 1 and 'a' in open_mode:
-            raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        with open(file_path, open_mode) as stream:
-            file = self.get_file_to_stream(
-                share_name, directory_name, file_name, stream,
-                start_range, end_range, validate_content,
-                progress_callback, max_connections, timeout, snapshot)
-
-        return file
-
-    def get_file_to_stream(
-            self, share_name, directory_name, file_name, stream,
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file to a stream, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties
-        and metadata.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param io.IOBase stream:
-            Opened file/stream to write to.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('stream', stream)
-
-        if end_range is not None:
-            _validate_not_none("start_range", start_range)
-
-        # the stream must be seekable if parallel download is required
-        if max_connections > 1:
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-            else:
-                try:
-                    stream.seek(stream.tell())
-                except (NotImplementedError, AttributeError):
-                    raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
-
-        # The service only provides transactional MD5s for chunks under 4MB.
-        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
-        # chunk so a transactional MD5 can be retrieved.
-        first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
-
-        initial_request_start = start_range if start_range is not None else 0
-
-        if end_range is not None and end_range - start_range < first_get_size:
-            initial_request_end = end_range
-        else:
-            initial_request_end = initial_request_start + first_get_size - 1
-
-        # Send a context object to make sure we always retry to the initial location
-        operation_context = _OperationContext(location_lock=True)
-        try:
-            file = self._get_file(share_name,
-                                  directory_name,
-                                  file_name,
-                                  start_range=initial_request_start,
-                                  end_range=initial_request_end,
-                                  validate_content=validate_content,
-                                  timeout=timeout,
-                                  _context=operation_context,
-                                  snapshot=snapshot)
-
-            # Parse the total file size and adjust the download size if ranges
-            # were specified
-            file_size = _parse_length_from_content_range(file.properties.content_range)
-            if end_range is not None:
-                # Use the end_range unless it is over the end of the file
-                download_size = min(file_size, end_range - start_range + 1)
-            elif start_range is not None:
-                download_size = file_size - start_range
-            else:
-                download_size = file_size
-        except AzureHttpError as ex:
-            if start_range is None and ex.status_code == 416:
-                # Get range will fail on an empty file. If the user did not
-                # request a range, do a regular get request in order to get
-                # any properties.
-                file = self._get_file(share_name,
-                                      directory_name,
-                                      file_name,
-                                      validate_content=validate_content,
-                                      timeout=timeout,
-                                      _context=operation_context,
-                                      snapshot=snapshot)
-
-                # Set the download size to empty
-                download_size = 0
-            else:
-                raise ex
-
-        # Mark the first progress chunk. If the file is small, this is the only call
-        if progress_callback:
-            progress_callback(file.properties.content_length, download_size)
-
-        # Write the content to the user stream  
-        # Clear file content since output has been written to user stream   
-        if file.content is not None:
-            stream.write(file.content)
-            file.content = None
-
-        # If the file is small, the download is complete at this point.
-        # If file size is large, download the rest of the blob in chunks.
-        if file.properties.content_length != download_size:
-            # At this point we would like to lock on something like the etag so that
-            # if the file is modified, we do not get a corrupted download. However,
-            # this feature is not yet available on the file service.
-
-            end_file = file_size
-            if end_range is not None:
-                # Use the end_range unless it is over the end of the file
-                end_file = min(file_size, end_range + 1)
-
-            _download_file_chunks(
-                self,
-                share_name,
-                directory_name,
-                file_name,
-                download_size,
-                self.MAX_CHUNK_GET_SIZE,
-                first_get_size,
-                initial_request_end + 1,  # start where the first download ended
-                end_file,
-                stream,
-                max_connections,
-                progress_callback,
-                validate_content,
-                timeout,
-                operation_context,
-                snapshot
-            )
-
-            # Set the content length to the download size instead of the size of 
-            # the last range
-            file.properties.content_length = download_size
-
-            # Overwrite the content range to the user requested range
-            file.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, file_size)
-
-            # Overwrite the content MD5 as it is the MD5 for the last range instead 
-            # of the stored MD5
-            # TODO: Set to the stored MD5 when the service returns this
-            file.properties.content_md5 = None
-
-        return file
-
-    def get_file_to_bytes(self, share_name, directory_name, file_name,
-                          start_range=None, end_range=None, validate_content=False,
-                          progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file as an array of bytes, with automatic chunking and
-        progress notifications. Returns an instance of :class:`~azure.storage.file.models.File` with
-        properties, metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-
-        stream = BytesIO()
-        file = self.get_file_to_stream(
-            share_name,
-            directory_name,
-            file_name,
-            stream,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            timeout,
-            snapshot)
-
-        file.content = stream.getvalue()
-        return file
-
-    def get_file_to_text(
-            self, share_name, directory_name, file_name, encoding='utf-8',
-            start_range=None, end_range=None, validate_content=False,
-            progress_callback=None, max_connections=2, timeout=None, snapshot=None):
-        '''
-        Downloads a file as unicode text, with automatic chunking and progress
-        notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties,
-        metadata, and content.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param str encoding:
-            Python encoding to use when decoding the file data.
-        :param int start_range:
-            Start of byte range to use for downloading a section of the file.
-            If no end_range is given, all bytes after the start_range will be downloaded.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for downloading a section of the file.
-            If end_range is given, start_range must be provided.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If set to true, validates an MD5 hash for each retrieved portion of 
-            the file. This is primarily valuable for detecting bitflips on the wire 
-            if using http instead of https as https (the default) will already 
-            validate. Note that the service will only return transactional MD5s 
-            for chunks 4MB or less so the first get request will be of size 
-            self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If 
-            self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be 
-            thrown. As computing the MD5 takes processing time and more requests 
-            will need to be done due to the reduced chunk size there may be some 
-            increase in latency.
-        :param progress_callback:
-            Callback for progress with signature function(current, total) 
-            where current is the number of bytes transfered so far, and total is 
-            the size of the file if known.
-        :type progress_callback: func(current, total)
-        :param int max_connections:
-            If set to 2 or greater, an initial get will be done for the first 
-            self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, 
-            the method returns at this point. If it is not, it will download the 
-            remaining data parallel using the number of threads equal to 
-            max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
-            If set to 1, a single large get request will be done. This is not 
-            generally recommended but available if very few threads should be 
-            used, network requests are very expensive, or a non-seekable stream 
-            prevents parallel download. This may also be valuable if the file is 
-            being concurrently modified to enforce atomicity or if many files are 
-            expected to be empty as an extra request is required for empty files 
-            if max_connections is greater than 1.
-        :param int timeout:
-            The timeout parameter is expressed in seconds. This method may make 
-            multiple calls to the Azure service and the timeout will apply to 
-            each call individually.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :return: A File with properties, content, and metadata.
-        :rtype: :class:`~azure.storage.file.models.File`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('encoding', encoding)
-
-        file = self.get_file_to_bytes(
-            share_name,
-            directory_name,
-            file_name,
-            start_range,
-            end_range,
-            validate_content,
-            progress_callback,
-            max_connections,
-            timeout,
-            snapshot)
-
-        file.content = file.content.decode(encoding)
-        return file
-
-    def update_range(self, share_name, directory_name, file_name, data,
-                     start_range, end_range, validate_content=False, timeout=None):
-        '''
-        Writes the bytes specified by the request body into the specified range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param bytes data:
-            Content of the range.
-        :param int start_range:
-            Start of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage 
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting 
-            bitflips on the wire if using http instead of https as https (the default) 
-            will already validate. Note that this MD5 hash is not stored with the 
-            file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        _validate_not_none('data', data)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'range',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'x-ms-write': 'update',
-        }
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-        request.body = _get_data_bytes_only('data', data)
-
-        if validate_content:
-            computed_md5 = _get_content_md5(request.body)
-            request.headers['Content-MD5'] = _to_str(computed_md5)
-
-        self._perform_request(request)
-
-    def clear_range(self, share_name, directory_name, file_name, start_range,
-                    end_range, timeout=None):
-        '''
-        Clears the specified range and releases the space used in storage for 
-        that range.
-         
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Start of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            End of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'range',
-            'timeout': _int_to_str(timeout),
-        }
-        request.headers = {
-            'Content-Length': '0',
-            'x-ms-write': 'clear',
-        }
-        _validate_and_format_range_headers(
-            request, start_range, end_range)
-
-        self._perform_request(request)
-
-    def list_ranges(self, share_name, directory_name, file_name,
-                    start_range=None, end_range=None, timeout=None, snapshot=None):
-        '''
-        Retrieves the valid ranges for a file.
-
-        :param str share_name:
-            Name of existing share.
-        :param str directory_name:
-            The path to the directory.
-        :param str file_name:
-            Name of existing file.
-        :param int start_range:
-            Specifies the start offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int end_range:
-            Specifies the end offset of bytes over which to list ranges.
-            The start_range and end_range params are inclusive.
-            Ex: start_range=0, end_range=511 will download first 512 bytes of file.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :param str snapshot:
-            A string that represents the snapshot version, if applicable.
-        :returns: a list of valid ranges
-        :rtype: a list of :class:`~azure.storage.file.models.FileRange`
-        '''
-        _validate_not_none('share_name', share_name)
-        _validate_not_none('file_name', file_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(share_name, directory_name, file_name)
-        request.query = {
-            'comp': 'rangelist',
-            'timeout': _int_to_str(timeout),
-            'sharesnapshot': _to_str(snapshot),
-        }
-        if start_range is not None:
-            _validate_and_format_range_headers(
-                request,
-                start_range,
-                end_range,
-                start_range_required=False,
-                end_range_required=False)
-
-        return self._perform_request(request, _convert_xml_to_ranges)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/models.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/models.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,445 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ..common._common_conversion import _to_str
-
-
-class Share(object):
-    '''
-    File share class.
-    
-    :ivar str name:
-        The name of the share.
-    :ivar ShareProperties properties:
-        System properties for the share.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the share as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list shares operation. If this parameter was specified but the 
-        share has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    :ivar str snapshot:
-        A DateTime value that uniquely identifies the snapshot. The value of
-        this header indicates the snapshot version, and may be used in
-        subsequent requests to access the snapshot.
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None, snapshot=None):
-        self.name = name
-        self.properties = props or ShareProperties()
-        self.metadata = metadata
-        self.snapshot = snapshot
-
-
-class ShareProperties(object):
-    '''
-    File share's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the share was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int quote:
-        Returns the current share quota in GB.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.quota = None
-
-
-class Directory(object):
-    '''
-    Directory class.
-    
-    :ivar str name:
-        The name of the directory.
-    :ivar DirectoryProperties properties:
-        System properties for the directory.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the directory as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list directory operation. If this parameter was specified but the 
-        directory has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self, name=None, props=None, metadata=None):
-        self.name = name
-        self.properties = props or DirectoryProperties()
-        self.metadata = metadata
-
-
-class DirectoryProperties(object):
-    '''
-    File directory's properties class.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the directory was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar bool server_encrypted:
-        Set to true if the directory metadata is encrypted on the server.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.server_encrypted = None
-
-
-class File(object):
-    '''
-    File class.
-    
-    :ivar str name:
-        The name of the file.
-    :ivar content:
-        File content.
-    :vartype content: str or bytes
-    :ivar FileProperties properties:
-        System properties for the file.
-    :ivar metadata:
-        A dict containing name-value pairs associated with the file as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list file operation. If this parameter was specified but the 
-        file has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self, name=None, content=None, props=None, metadata=None):
-        self.name = name
-        self.content = content
-        self.properties = props or FileProperties()
-        self.metadata = metadata
-
-
-class FileProperties(object):
-    '''
-    File Properties.
-    
-    :ivar datetime last_modified:
-        A datetime object representing the last time the file was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int content_length:
-        The length of the content returned. If the entire blob was requested, 
-        the length of blob in bytes. If a subset of the blob was requested, the 
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client 
-        requested a subset of the blob.
-    :ivar ~azure.storage.file.models.ContentSettings content_settings:
-        Stores all the content settings for the file.
-    :ivar ~azure.storage.file.models.CopyProperties copy:
-        Stores all the copy properties for the file.
-    ivar bool server_encrypted:
-        Set to true if the file data and application metadata are completely encrypted.
-    '''
-
-    def __init__(self):
-        self.last_modified = None
-        self.etag = None
-        self.content_length = None
-        self.content_range = None
-        self.content_settings = ContentSettings()
-        self.copy = CopyProperties()
-        self.server_encrypted = None
-
-
-class Handle(object):
-    """
-    Represents a file handle.
-
-    :ivar str handle_id:
-        Used to identify handle.
-    :ivar str path:
-        Used to identify the name of the object for which the handle is open.
-    :ivar str file_id:
-        Uniquely identifies the file.
-        This is useful when renames are happening as the file ID does not change.
-    :ivar str parent_id:
-        Uniquely identifies the parent directory.
-        This is useful when renames are happening as the parent ID does not change.
-    :ivar str session_id:
-        Session ID in context of which the file handle was opened.
-    :ivar str client_ip:
-        Used to identify client that has opened the handle.
-        The field is included only if client IP is known by the service.
-    :ivar datetime open_time:
-        Used to decide if handle may have been leaked.
-    :ivar datetime last_reconnect_time:
-        Used to decide if handle was reopened after client/server disconnect due to networking or other faults.
-        The field is included only if disconnect event occurred and handle was reopened.
-    """
-
-    def __init__(self, handle_id=None, path=None, file_id=None, parent_id=None, session_id=None,
-                 client_ip=None, open_time=None, last_reconnect_time=None):
-        self.handle_id = handle_id
-        self.path = path
-        self.file_id = file_id
-        self.parent_id = parent_id
-        self.session_id = session_id
-        self.client_ip = client_ip
-        self.open_time = open_time
-        self.last_reconnect_time = last_reconnect_time
-
-
-class ContentSettings(object):
-    '''
-    Used to store the content settings of a file.
-
-    :ivar str content_type:
-        The content type specified for the file. If no content type was
-        specified, the default content type is application/octet-stream. 
-    :ivar str content_encoding:
-        If content_encoding has previously been set
-        for the file, that value is stored.
-    :ivar str content_language:
-        If content_language has previously been set
-        for the file, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the file, that value is stored.
-    :ivar str cache_control:
-        If cache_control has previously been set for
-        the file, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the file, this response
-        header is stored so that the client can check for message content
-        integrity.
-    '''
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None):
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.content_md5 = content_md5
-
-    def _to_headers(self):
-        return {
-            'x-ms-cache-control': _to_str(self.cache_control),
-            'x-ms-content-type': _to_str(self.content_type),
-            'x-ms-content-disposition': _to_str(self.content_disposition),
-            'x-ms-content-md5': _to_str(self.content_md5),
-            'x-ms-content-encoding': _to_str(self.content_encoding),
-            'x-ms-content-language': _to_str(self.content_language),
-        }
-
-
-class CopyProperties(object):
-    '''
-    File Copy Properties.
-    
-    :ivar str id:
-        String identifier for the last attempted Copy File operation where this file
-        was the destination file. This header does not appear if this file has never
-        been the destination in a Copy File operation, or if this file has been
-        modified after a concluded Copy File operation using Set File Properties or
-        Put File.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source file used in the last attempted
-        Copy File operation where this file was the destination file. This header does not
-        appear if this file has never been the destination in a Copy File operation, or if
-        this file has been modified after a concluded Copy File operation using
-        Set File Properties or Put File.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending: 
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy File.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy File operation where this file was the destination file. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy File operation where this file was the
-        destination file. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure. 
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.source = None
-        self.status = None
-        self.progress = None
-        self.completion_time = None
-        self.status_description = None
-
-
-class FileRange(object):
-    '''
-    File Range.
-    
-    :ivar int start:
-        Byte index for start of file range.
-    :ivar int end:
-        Byte index for end of file range.
-    '''
-
-    def __init__(self, start=None, end=None):
-        self.start = start
-        self.end = end
-
-
-class DeleteSnapshot(object):
-    '''
-    Required if the Share has associated snapshots. Specifies how to handle the snapshots.
-    '''
-
-    Include = 'include'
-    '''
-    Delete the share and all of its snapshots.
-    '''
-
-
-class FilePermissions(object):
-    '''
-    FilePermissions class to be used with 
-    :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API.
-
-    :ivar FilePermissions FilePermissions.CREATE:
-        Create a new file or copy a file to a new file.
-    :ivar FilePermissions FilePermissions.DELETE: 
-        Delete the file.
-    :ivar FilePermissions FilePermissions.READ:
-        Read the content, properties, metadata. Use the file as the source of a copy 
-        operation.
-    :ivar FilePermissions FilePermissions.WRITE: 
-        Create or write content, properties, metadata. Resize the file. Use the file 
-        as the destination of a copy operation within the same account.
-    '''
-
-    def __init__(self, read=False, create=False, write=False, delete=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties, metadata. Use the file as the source of a copy 
-            operation.
-        :param bool create:
-            Create a new file or copy a file to a new file.
-        :param bool write: 
-            Create or write content, properties, metadata. Resize the file. Use the file 
-            as the destination of a copy operation within the same account.
-        :param bool delete: 
-            Delete the file.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.create = create or ('c' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-
-    def __or__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return FilePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('c' if self.create else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else ''))
-
-
-FilePermissions.CREATE = FilePermissions(create=True)
-FilePermissions.DELETE = FilePermissions(delete=True)
-FilePermissions.READ = FilePermissions(read=True)
-FilePermissions.WRITE = FilePermissions(write=True)
-
-
-class SharePermissions(object):
-    '''
-    SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature`
-    method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. 
-
-    :ivar SharePermissions FilePermissions.DELETE: 
-        Delete any file in the share.
-        Note: You cannot grant permissions to delete a share with a service SAS. Use 
-        an account SAS instead.
-    :ivar SharePermissions FilePermissions.LIST: 
-        List files and directories in the share.
-    :ivar SharePermissions FilePermissions.READ:
-        Read the content, properties or metadata of any file in the share. Use any 
-        file in the share as the source of a copy operation.
-    :ivar SharePermissions FilePermissions.WRITE: 
-        For any file in the share, create or write content, properties or metadata. 
-        Resize the file. Use the file as the destination of a copy operation within 
-        the same account.
-        Note: You cannot grant permissions to read or write share properties or 
-        metadata with a service SAS. Use an account SAS instead.
-    '''
-
-    def __init__(self, read=False, write=False, delete=False, list=False,
-                 _str=None):
-        '''
-        :param bool read:
-            Read the content, properties or metadata of any file in the share. Use any 
-            file in the share as the source of a copy operation.
-        :param bool write: 
-            For any file in the share, create or write content, properties or metadata. 
-            Resize the file. Use the file as the destination of a copy operation within 
-            the same account.
-            Note: You cannot grant permissions to read or write share properties or 
-            metadata with a service SAS. Use an account SAS instead.
-        :param bool delete: 
-            Delete any file in the share.
-            Note: You cannot grant permissions to delete a share with a service SAS. Use 
-            an account SAS instead.
-        :param bool list: 
-            List files and directories in the share.
-        :param str _str: 
-            A string representing the permissions
-        '''
-
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.write = write or ('w' in _str)
-        self.delete = delete or ('d' in _str)
-        self.list = list or ('l' in _str)
-
-    def __or__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return SharePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('w' if self.write else '') +
-                ('d' if self.delete else '') +
-                ('l' if self.list else ''))
-
-
-SharePermissions.DELETE = SharePermissions(delete=True)
-SharePermissions.LIST = SharePermissions(list=True)
-SharePermissions.READ = SharePermissions(read=True)
-SharePermissions.WRITE = SharePermissions(write=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/file/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/file/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,229 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-    _QueryStringConstants,
-    _sign_string,
-)
-from ..common._common_conversion import (
-    _to_str,
-)
-from ._constants import X_MS_VERSION
-
-
-class FileSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating file and share access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_file(self, share_name, directory_name=None, file_name=None,
-                      permission=None, expiry=None, start=None, id=None,
-                      ip=None, protocol=None, cache_control=None,
-                      content_disposition=None, content_encoding=None,
-                      content_language=None, content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param FilePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = share_name
-        if directory_name is not None:
-            resource_path += '/' + _to_str(directory_name)
-        resource_path += '/' + _to_str(file_name)
-
-        sas = _FileSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('f')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, resource_path)
-
-        return sas.get_token()
-
-    def generate_share(self, share_name, permission=None, expiry=None,
-                       start=None, id=None, ip=None, protocol=None,
-                       cache_control=None, content_disposition=None,
-                       content_encoding=None, content_language=None,
-                       content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param SharePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _FileSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource('s')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, share_name)
-
-        return sas.get_token()
-
-
-class _FileSharedAccessHelper(_SharedAccessHelper):
-    def __init__(self):
-        super(_FileSharedAccessHelper, self).__init__()
-
-    def add_resource_signature(self, account_name, account_key, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/file/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-             get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-             get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/__init__.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/__init__.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from .models import (
-    Queue,
-    QueueMessage,
-    QueuePermissions,
-    QueueMessageFormat,
-)
-
-from .queueservice import QueueService
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_constants.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_constants.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,11 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
-__version__ = '2.0.1'
-
-# x-ms-version for storage service.
-X_MS_VERSION = '2018-03-28'
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_deserialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_deserialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_deserialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_deserialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,150 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from dateutil import parser
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from .models import (
-    Queue,
-    QueueMessage,
-)
-from ..common.models import (
-    _list,
-)
-from ..common._deserialization import (
-    _to_int,
-    _parse_metadata,
-)
-from ._encryption import (
-    _decrypt_queue_message,
-)
-
-
-def _parse_metadata_and_message_count(response):
-    '''
-    Extracts approximate messages count header.
-    '''
-    metadata = _parse_metadata(response)
-    metadata.approximate_message_count = _to_int(response.headers.get('x-ms-approximate-messages-count'))
-
-    return metadata
-
-
-def _parse_queue_message_from_headers(response):
-    '''
-    Extracts pop receipt and time next visible from headers.
-    '''
-    message = QueueMessage()
-    message.pop_receipt = response.headers.get('x-ms-popreceipt')
-    message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible'))
-
-    return message
-
-
-def _convert_xml_to_queues(response):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/">
-      <Prefix>string-value</Prefix>
-      <Marker>string-value</Marker>
-      <MaxResults>int-value</MaxResults>
-      <Queues>
-        <Queue>
-          <Name>string-value</Name>
-          <Metadata>
-            <metadata-name>value</metadata-name>
-          </Metadata>
-        </Queue>
-      <NextMarker />
-    </EnumerationResults>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    queues = _list()
-    list_element = ETree.fromstring(response.body)
-
-    # Set next marker
-    next_marker = list_element.findtext('NextMarker') or None
-    setattr(queues, 'next_marker', next_marker)
-
-    queues_element = list_element.find('Queues')
-
-    for queue_element in queues_element.findall('Queue'):
-        # Name element
-        queue = Queue()
-        queue.name = queue_element.findtext('Name')
-
-        # Metadata
-        metadata_root_element = queue_element.find('Metadata')
-        if metadata_root_element is not None:
-            queue.metadata = dict()
-            for metadata_element in metadata_root_element:
-                queue.metadata[metadata_element.tag] = metadata_element.text
-
-        # Add queue to list
-        queues.append(queue)
-
-    return queues
-
-
-def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver,
-                                   content=None):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessagesList>
-        <QueueMessage>
-          <MessageId>string-message-id</MessageId>
-          <InsertionTime>insertion-time</InsertionTime>
-          <ExpirationTime>expiration-time</ExpirationTime>
-          <PopReceipt>opaque-string-receipt-data</PopReceipt>
-          <TimeNextVisible>time-next-visible</TimeNextVisible>
-          <DequeueCount>integer</DequeueCount>
-          <MessageText>message-body</MessageText>
-        </QueueMessage>
-    </QueueMessagesList>
-    '''
-    if response is None or response.body is None:
-        return None
-
-    messages = list()
-    list_element = ETree.fromstring(response.body)
-
-    for message_element in list_element.findall('QueueMessage'):
-        message = QueueMessage()
-
-        message.id = message_element.findtext('MessageId')
-
-        dequeue_count = message_element.findtext('DequeueCount')
-        if dequeue_count is not None:
-            message.dequeue_count = _to_int(dequeue_count)
-
-        # content is not returned for put_message
-        if content is not None:
-            message.content = content
-        else:
-            message.content = message_element.findtext('MessageText')
-            if (key_encryption_key is not None) or (resolver is not None):
-                message.content = _decrypt_queue_message(message.content, require_encryption,
-                                                         key_encryption_key, resolver)
-            message.content = decode_function(message.content)
-
-        message.insertion_time = parser.parse(message_element.findtext('InsertionTime'))
-        message.expiration_time = parser.parse(message_element.findtext('ExpirationTime'))
-
-        message.pop_receipt = message_element.findtext('PopReceipt')
-
-        time_next_visible = message_element.find('TimeNextVisible')
-        if time_next_visible is not None:
-            message.time_next_visible = parser.parse(time_next_visible.text)
-
-        # Add message to list
-        messages.append(message)
-
-    return messages
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_encryption.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_encryption.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-from json import (
-    dumps,
-    loads,
-)
-
-from azure.common import (
-    AzureException,
-)
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from ..common._common_conversion import (
-    _encode_base64,
-    _decode_base64_to_bytes
-)
-from ..common._encryption import (
-    _generate_encryption_data_dict,
-    _dict_to_encryption_data,
-    _generate_AES_CBC_cipher,
-    _validate_and_unwrap_cek,
-    _EncryptionAlgorithm,
-)
-from ..common._error import (
-    _ERROR_DECRYPTION_FAILURE,
-    _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
-    _validate_not_none,
-    _validate_key_encryption_key_wrap,
-)
-from ._error import (
-    _ERROR_MESSAGE_NOT_ENCRYPTED
-)
-
-
-def _encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). 
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should 
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED)
-
-        return message
-    try:
-        return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception:
-        raise AzureException(_ERROR_DECRYPTION_FAILURE)
-
-
-def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
-        raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_error.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_error.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_error.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_error.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-from ..common._error import (
-    _validate_type_bytes,
-)
-
-_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.'
-_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.'
-_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.'
-_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.'
-
-def _validate_message_type_text(param):
-    if sys.version_info < (3,):
-        if not isinstance(param, unicode):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE)
-    else:
-        if not isinstance(param, str):
-            raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR)
-
-
-def _validate_message_type_bytes(param):
-    _validate_type_bytes('message', param)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_serialization.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_serialization.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/_serialization.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/_serialization.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,73 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import sys
-
-if sys.version_info >= (3,):
-    from io import BytesIO
-else:
-    try:
-        from cStringIO import StringIO as BytesIO
-    except:
-        from StringIO import StringIO as BytesIO
-
-try:
-    from xml.etree import cElementTree as ETree
-except ImportError:
-    from xml.etree import ElementTree as ETree
-
-from ..common._common_conversion import (
-    _str,
-)
-from ._encryption import (
-    _encrypt_queue_message,
-)
-
-
-def _get_path(queue_name=None, include_messages=None, message_id=None):
-    '''
-    Creates the path to access a queue resource.
-
-    queue_name:
-        Name of queue.
-    include_messages:
-        Whether or not to include messages.
-    message_id:
-        Message id.
-    '''
-    if queue_name and include_messages and message_id:
-        return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
-    if queue_name and include_messages:
-        return '/{0}/messages'.format(_str(queue_name))
-    elif queue_name:
-        return '/{0}'.format(_str(queue_name))
-    else:
-        return '/'
-
-
-def _convert_queue_message_xml(message_text, encode_function, key_encryption_key):
-    '''
-    <?xml version="1.0" encoding="utf-8"?>
-    <QueueMessage>
-        <MessageText></MessageText>
-    </QueueMessage>
-    '''
-    queue_message_element = ETree.Element('QueueMessage')
-
-    # Enabled
-    message_text = encode_function(message_text)
-    if key_encryption_key is not None:
-        message_text = _encrypt_queue_message(message_text, key_encryption_key)
-    ETree.SubElement(queue_message_element, 'MessageText').text = message_text
-
-    # Add xml declaration and serialize
-    try:
-        stream = BytesIO()
-        ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
-        output = stream.getvalue()
-    finally:
-        stream.close()
-
-    return output
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/models.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/models.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,239 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from base64 import (
-    b64encode,
-    b64decode,
-)
-from xml.sax.saxutils import escape as xml_escape
-from xml.sax.saxutils import unescape as xml_unescape
-
-from ._error import (
-    _validate_message_type_bytes,
-    _validate_message_type_text,
-    _ERROR_MESSAGE_NOT_BASE64,
-)
-
-
-class Queue(object):
-    '''
-    Queue class.
-     
-    :ivar str name: 
-        The name of the queue.
-    :ivar metadata: 
-        A dict containing name-value pairs associated with the queue as metadata.
-        This var is set to None unless the include=metadata param was included 
-        for the list queues operation. If this parameter was specified but the 
-        queue has no metadata, metadata will be set to an empty dictionary.
-    :vartype metadata: dict(str, str)
-    '''
-
-    def __init__(self):
-        self.name = None
-        self.metadata = None
-
-
-class QueueMessage(object):
-    ''' 
-    Queue message class. 
-
-    :ivar str id: 
-        A GUID value assigned to the message by the Queue service that 
-        identifies the message in the queue. This value may be used together 
-        with the value of pop_receipt to delete a message from the queue after 
-        it has been retrieved with the get messages operation. 
-    :ivar date insertion_time: 
-        A UTC date value representing the time the messages was inserted.
-    :ivar date expiration_time: 
-        A UTC date value representing the time the message expires.
-    :ivar int dequeue_count: 
-        Begins with a value of 1 the first time the message is dequeued. This 
-        value is incremented each time the message is subsequently dequeued.
-    :ivar obj content: 
-        The message content. Type is determined by the decode_function set on 
-        the service. Default is str.
-    :ivar str pop_receipt: 
-        A receipt str which can be used together with the message_id element to 
-        delete a message from the queue after it has been retrieved with the get 
-        messages operation. Only returned by get messages operations. Set to 
-        None for peek messages.
-    :ivar date time_next_visible: 
-        A UTC date value representing the time the message will next be visible. 
-        Only returned by get messages operations. Set to None for peek messages.
-    '''
-
-    def __init__(self):
-        self.id = None
-        self.insertion_time = None
-        self.expiration_time = None
-        self.dequeue_count = None
-        self.content = None
-        self.pop_receipt = None
-        self.time_next_visible = None
-
-
-class QueueMessageFormat:
-    ''' 
-    Encoding and decoding methods which can be used to modify how the queue service 
-    encodes and decodes queue messages. Set these to queueservice.encode_function 
-    and queueservice.decode_function to modify the behavior. The defaults are 
-    text_xmlencode and text_xmldecode, respectively.
-    '''
-
-    @staticmethod
-    def text_base64encode(data):
-        '''
-        Base64 encode unicode text.
-        
-        :param str data: String to encode.
-        :return: Base64 encoded string.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return b64encode(data.encode('utf-8')).decode('utf-8')
-
-    @staticmethod
-    def text_base64decode(data):
-        '''
-        Base64 decode to unicode text.
-        
-        :param str data: String data to decode to unicode.
-        :return: Base64 decoded string.
-        :rtype: str
-        '''
-        try:
-            return b64decode(data.encode('utf-8')).decode('utf-8')
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def binary_base64encode(data):
-        '''
-        Base64 encode byte strings.
-        
-        :param str data: Binary string to encode.
-        :return: Base64 encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_bytes(data)
-        return b64encode(data).decode('utf-8')
-
-    @staticmethod
-    def binary_base64decode(data):
-        '''
-        Base64 decode to byte string.
-        
-        :param str data: Data to decode to a byte string.
-        :return: Base64 decoded data.
-        :rtype: str
-        '''
-        try:
-            return b64decode(data.encode('utf-8'))
-        except (ValueError, TypeError):
-            # ValueError for Python 3, TypeError for Python 2
-            raise ValueError(_ERROR_MESSAGE_NOT_BASE64)
-
-    @staticmethod
-    def text_xmlencode(data):
-        ''' 
-        XML encode unicode text.
-
-        :param str data: Unicode string to encode
-        :return: XML encoded data.
-        :rtype: str
-        '''
-        _validate_message_type_text(data)
-        return xml_escape(data)
-
-    @staticmethod
-    def text_xmldecode(data):
-        ''' 
-        XML decode to unicode text.
-
-        :param str data: Data to decode to unicode.
-        :return: XML decoded data.
-        :rtype: str
-        '''
-        return xml_unescape(data)
-
-    @staticmethod
-    def noencode(data):
-        ''' 
-        Do no encoding. 
-
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str
-        '''
-        return data
-
-    @staticmethod
-    def nodecode(data):
-        '''
-        Do no decoding.
-        
-        :param str data: Data.
-        :return: The data passed in is returned unmodified.
-        :rtype: str        
-        '''
-        return data
-
-
-class QueuePermissions(object):
-    '''
-    QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature`
-    method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. 
-
-    :ivar QueuePermissions QueuePermissions.READ: 
-        Read metadata and properties, including message count. Peek at messages. 
-    :ivar QueuePermissions QueuePermissions.ADD: 
-        Add messages to the queue.
-    :ivar QueuePermissions QueuePermissions.UPDATE:
-        Update messages in the queue. Note: Use the Process permission with 
-        Update so you can first get the message you want to update.
-    :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities.
-        Get and delete messages from the queue. 
-    '''
-
-    def __init__(self, read=False, add=False, update=False, process=False, _str=None):
-        '''
-        :param bool read:
-            Read metadata and properties, including message count. Peek at messages.
-        :param bool add:
-            Add messages to the queue.
-        :param bool update:
-            Update messages in the queue. Note: Use the Process permission with 
-            Update so you can first get the message you want to update.
-        :param bool process: 
-            Get and delete messages from the queue.
-        :param str _str: 
-            A string representing the permissions.
-        '''
-        if not _str:
-            _str = ''
-        self.read = read or ('r' in _str)
-        self.add = add or ('a' in _str)
-        self.update = update or ('u' in _str)
-        self.process = process or ('p' in _str)
-
-    def __or__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __add__(self, other):
-        return QueuePermissions(_str=str(self) + str(other))
-
-    def __str__(self):
-        return (('r' if self.read else '') +
-                ('a' if self.add else '') +
-                ('u' if self.update else '') +
-                ('p' if self.process else ''))
-
-
-QueuePermissions.READ = QueuePermissions(read=True)
-QueuePermissions.ADD = QueuePermissions(add=True)
-QueuePermissions.UPDATE = QueuePermissions(update=True)
-QueuePermissions.PROCESS = QueuePermissions(process=True)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/queueservice.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/queueservice.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/queueservice.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/queueservice.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1009 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.common import (
-    AzureConflictHttpError,
-    AzureHttpError,
-)
-
-from ..common._auth import (
-    _StorageSASAuthentication,
-    _StorageSharedKeyAuthentication,
-)
-from ..common._common_conversion import (
-    _int_to_str,
-    _to_str,
-)
-from ..common._connection import _ServiceParameters
-from ..common._constants import (
-    SERVICE_HOST_BASE,
-    DEFAULT_PROTOCOL,
-)
-from ..common._deserialization import (
-    _convert_xml_to_service_properties,
-    _convert_xml_to_signed_identifiers,
-    _convert_xml_to_service_stats,
-)
-from ..common._error import (
-    _dont_fail_not_exist,
-    _dont_fail_on_exist,
-    _validate_not_none,
-    _ERROR_CONFLICT,
-    _ERROR_STORAGE_MISSING_INFO,
-    _validate_access_policies,
-    _validate_encryption_required,
-    _validate_decryption_required,
-)
-from ..common._http import (
-    HTTPRequest,
-)
-from ..common._serialization import (
-    _convert_signed_identifiers_to_xml,
-    _convert_service_properties_to_xml,
-)
-from ..common._serialization import (
-    _get_request_body,
-    _add_metadata_headers,
-)
-from ..common.models import (
-    Services,
-    ListGenerator,
-    _OperationContext,
-)
-from .sharedaccesssignature import (
-    QueueSharedAccessSignature,
-)
-from ..common.storageclient import StorageClient
-from ._deserialization import (
-    _convert_xml_to_queues,
-    _convert_xml_to_queue_messages,
-    _parse_queue_message_from_headers,
-    _parse_metadata_and_message_count,
-)
-from ._serialization import (
-    _convert_queue_message_xml,
-    _get_path,
-)
-from .models import (
-    QueueMessageFormat,
-)
-from ._constants import (
-    X_MS_VERSION,
-    __version__ as package_version,
-)
-
-_QUEUE_ALREADY_EXISTS_ERROR_CODE = 'QueueAlreadyExists'
-_QUEUE_NOT_FOUND_ERROR_CODE = 'QueueNotFound'
-_HTTP_RESPONSE_NO_CONTENT = 204
-
-
-class QueueService(StorageClient):
-    '''
-    This is the main class managing queue resources.
-
-    The Queue service stores messages. A queue can contain an unlimited number of 
-    messages, each of which can be up to 64KB in size. Messages are generally added 
-    to the end of the queue and retrieved from the front of the queue, although 
-    first in, first out (FIFO) behavior is not guaranteed.
-
-    :ivar function(data) encode_function: 
-        A function used to encode queue messages. Takes as 
-        a parameter the data passed to the put_message API and returns the encoded 
-        message. Defaults to take text and xml encode, but bytes and other 
-        encodings can be used. For example, base64 may be preferable for developing 
-        across multiple Azure Storage libraries in different languages. See the 
-        :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and 
-        no encoding methods as well as binary equivalents.
-    :ivar function(data) decode_function: 
-        A function used to encode decode messages. Takes as 
-        a parameter the data returned by the get_messages and peek_messages APIs and 
-        returns the decoded message. Defaults to return text and xml decode, but 
-        bytes and other decodings can be used. For example, base64 may be preferable 
-        for developing across multiple Azure Storage libraries in different languages. 
-        See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 
-        and no decoding methods as well as binary equivalents.
-    :ivar object key_encryption_key:
-        The key-encryption-key optionally provided by the user. If provided, will be used to
-        encrypt/decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
-        If both are provided, the resolver will take precedence.
-        Must implement the following methods for APIs requiring encryption:
-        wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-        Must implement the following methods for APIs requiring decryption:
-        unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :ivar function key_resolver_function(kid):
-        A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
-        For methods requiring decryption, either the key_encryption_key OR
-        the resolver must be provided. If both are provided, the resolver will take precedence.
-        It uses the kid string to return a key-encryption-key implementing the interface defined above.
-    :ivar bool require_encryption:
-        A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
-        successfully read from the queue are/were encrypted while on the server. If this flag is set, all required 
-        parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
-    '''
-
-    def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
-                 protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, request_session=None,
-                 connection_string=None, socket_timeout=None, token_credential=None):
-        '''
-        :param str account_name:
-            The storage account name. This is used to authenticate requests 
-            signed with an account key and to construct the storage endpoint. It 
-            is required unless a connection string is given.
-        :param str account_key:
-            The storage account key. This is used for shared key authentication. 
-        :param str sas_token:
-             A shared access signature token to use to authenticate requests 
-             instead of the account key. If account key and sas token are both 
-             specified, account key will be used to sign.
-        :param bool is_emulated:
-            Whether to use the emulator. Defaults to False. If specified, will 
-            override all other parameters besides connection string and request 
-            session.
-        :param str protocol:
-            The protocol to use for requests. Defaults to https.
-        :param str endpoint_suffix:
-            The host base component of the url, minus the account name. Defaults 
-            to Azure (core.windows.net). Override this to use the China cloud 
-            (core.chinacloudapi.cn).
-        :param requests.Session request_session:
-            The session object to use for http requests.
-        :param str connection_string:
-            If specified, this will override all other parameters besides 
-            request session. See
-            http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
-            for the connection string format.
-        :param int socket_timeout:
-            If specified, this will override the default socket timeout. The timeout specified is in seconds.
-            See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
-        :param token_credential:
-            A token credential used to authenticate HTTPS requests. The token value
-            should be updated before its expiration.
-        :type `~azure.storage.common.TokenCredential`
-        '''
-        service_params = _ServiceParameters.get_service_parameters(
-            'queue',
-            account_name=account_name,
-            account_key=account_key,
-            sas_token=sas_token,
-            token_credential=token_credential,
-            is_emulated=is_emulated,
-            protocol=protocol,
-            endpoint_suffix=endpoint_suffix,
-            request_session=request_session,
-            connection_string=connection_string,
-            socket_timeout=socket_timeout)
-
-        super(QueueService, self).__init__(service_params)
-
-        if self.account_key:
-            self.authentication = _StorageSharedKeyAuthentication(
-                self.account_name,
-                self.account_key,
-                self.is_emulated
-            )
-        elif self.sas_token:
-            self.authentication = _StorageSASAuthentication(self.sas_token)
-        elif self.token_credential:
-            self.authentication = self.token_credential
-        else:
-            raise ValueError(_ERROR_STORAGE_MISSING_INFO)
-
-        self.encode_function = QueueMessageFormat.text_xmlencode
-        self.decode_function = QueueMessageFormat.text_xmldecode
-        self.key_encryption_key = None
-        self.key_resolver_function = None
-        self.require_encryption = False
-        self._X_MS_VERSION = X_MS_VERSION
-        self._update_user_agent_string(package_version)
-
-    def generate_account_shared_access_signature(self, resource_types, permission,
-                                                 expiry, start=None, ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue service.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account SAS.
-        :param AccountPermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = QueueSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_account(Services.QUEUE, resource_types, permission,
-                                    expiry, start=start, ip=ip, protocol=protocol)
-
-    def generate_queue_shared_access_signature(self, queue_name,
-                                               permission=None,
-                                               expiry=None,
-                                               start=None,
-                                               id=None,
-                                               ip=None, protocol=None, ):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            The name of the queue to create a SAS token for.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The 
-            user is restricted to operations allowed by the permissions. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has been 
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid. 
-            Required unless an id is given referencing a stored access policy 
-            which contains this field. This field must be omitted if it has 
-            been specified in an associated stored access policy. Azure will always 
-            convert values to UTC. If a date is passed in without timezone info, it 
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If 
-            omitted, start time for this call is assumed to be the time when the 
-            storage service receives the request. Azure will always convert values 
-            to UTC. If a date is passed in without timezone info, it is assumed to 
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a 
-            stored access policy. To create a stored access policy, use :func:`~set_queue_acl`.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :return: A Shared Access Signature (sas) token.
-        :rtype: str
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('self.account_name', self.account_name)
-        _validate_not_none('self.account_key', self.account_key)
-
-        sas = QueueSharedAccessSignature(self.account_name, self.account_key)
-        return sas.generate_queue(
-            queue_name,
-            permission=permission,
-            expiry=expiry,
-            start=start,
-            id=id,
-            ip=ip,
-            protocol=protocol,
-        )
-
-    def get_queue_service_stats(self, timeout=None):
-        '''
-        Retrieves statistics related to replication for the Queue service. It is 
-        only available when read-access geo-redundant replication is enabled for 
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable 
-        in two locations. In both locations, Azure Storage constantly maintains 
-        multiple healthy replicas of your data. The location where you read, 
-        create, update, or delete data is the primary storage account location. 
-        The primary location exists in the region you choose at the time you 
-        create an account via the Azure Management Azure classic portal, for 
-        example, North Central US. The location to which your data is replicated 
-        is the secondary location. The secondary location is automatically 
-        determined based on the location of the primary; it is in a second data 
-        center that resides in the same region as the primary location. Read-only 
-        access is available from the secondary location, if read-access geo-redundant 
-        replication is enabled for your storage account.
-
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The queue service stats.
-        :rtype: :class:`~azure.storage.common.models.ServiceStats`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(primary=False, secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'stats',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_stats)
-
-    def get_queue_service_properties(self, timeout=None):
-        '''
-        Gets the properties of a storage account's Queue service, including
-        logging, analytics and CORS rules.
-
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: The queue service properties.
-        :rtype: :class:`~azure.storage.common.models.ServiceProperties`
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_service_properties)
-
-    def set_queue_service_properties(self, logging=None, hour_metrics=None,
-                                     minute_metrics=None, cors=None, timeout=None):
-        '''
-        Sets the properties of a storage account's Queue service, including
-        Azure Storage Analytics. If an element (ex Logging) is left as None, the 
-        existing settings on the service for that functionality are preserved. 
-        For more information on Azure Storage Analytics, see 
-        https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
-
-        :param Logging logging:
-            The logging settings provide request logs.
-        :param Metrics hour_metrics:
-            The hour metrics settings provide a summary of request 
-            statistics grouped by API in hourly aggregates for queuess.
-        :param Metrics minute_metrics:
-            The minute metrics settings provide request statistics 
-            for each minute for queues.
-        :param cors:
-            You can include up to five CorsRule elements in the 
-            list. If an empty list is specified, all CORS rules will be deleted, 
-            and CORS will be disabled for the service. For detailed information 
-            about CORS rules and evaluation logic, see 
-            https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
-        :type cors: list(:class:`~azure.storage.common.models.CorsRule`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path()
-        request.query = {
-            'restype': 'service',
-            'comp': 'properties',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
-        self._perform_request(request)
-
-    def list_queues(self, prefix=None, num_results=None, include_metadata=False,
-                    marker=None, timeout=None):
-        '''
-        Returns a generator to list the queues. The generator will lazily follow 
-        the continuation tokens returned by the service and stop when all queues 
-        have been returned or num_results is reached.
-
-        If num_results is specified and the account has more than that number of 
-        queues, the generator will have a populated next_marker field once it 
-        finishes. This marker can be used to create a new generator if more 
-        results are desired.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param int num_results:
-            The maximum number of queues to return.
-        :param bool include_metadata:
-            Specifies that container metadata be returned in the response.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the 
-            next_marker field of a previous generator object if num_results was 
-            specified and that generator has finished enumerating results. If 
-            specified, this generator will begin returning results from the point 
-            where the previous generator stopped.
-        :param int timeout:
-            The server timeout, expressed in seconds. This function may make multiple 
-            calls to the service in which case the timeout value specified will be 
-            applied to each individual call.
-        '''
-        include = 'metadata' if include_metadata else None
-        operation_context = _OperationContext(location_lock=True)
-        kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include,
-                  'marker': marker, 'timeout': timeout, '_context': operation_context}
-        resp = self._list_queues(**kwargs)
-
-        return ListGenerator(resp, self._list_queues, (), kwargs)
-
-    def _list_queues(self, prefix=None, marker=None, max_results=None,
-                     include=None, timeout=None, _context=None):
-        '''
-        Returns a list of queues under the specified account. Makes a single list 
-        request to the service. Used internally by the list_queues method.
-
-        :param str prefix:
-            Filters the results to return only queues with names that begin
-            with the specified prefix.
-        :param str marker:
-            A token which identifies the portion of the query to be
-            returned with the next query operation. The operation returns a
-            next_marker element within the response body if the list returned
-            was not complete. This value may then be used as a query parameter
-            in a subsequent call to request the next portion of the list of
-            queues. The marker value is opaque to the client.
-        :param int max_results:
-            The maximum number of queues to return. A single list request may 
-            return up to 1000 queues and potentially a continuation token which 
-            should be followed to get additional resutls.
-        :param str include:
-            Include this parameter to specify that the container's
-            metadata be returned as part of the response body.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path()
-        request.query = {
-            'comp': 'list',
-            'prefix': _to_str(prefix),
-            'marker': _to_str(marker),
-            'maxresults': _int_to_str(max_results),
-            'include': _to_str(include),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queues, operation_context=_context)
-
-    def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None):
-        '''
-        Creates a queue under the given account.
-
-        :param str queue_name:
-            The name of the queue to create. A queue name must be from 3 through 
-            63 characters long and may only contain lowercase letters, numbers, 
-            and the dash (-) character. The first and last letters in the queue 
-            must be alphanumeric. The dash (-) character cannot be the first or 
-            last character. Consecutive dash characters are not permitted in the 
-            queue name.
-        :param metadata:
-            A dict containing name-value pairs to associate with the queue as 
-            metadata. Note that metadata names preserve the case with which they 
-            were created, but are case-insensitive when set or read. 
-        :type metadata: dict(str, str)
-        :param bool fail_on_exist:
-            Specifies whether to throw an exception if the queue already exists.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was created. If fail_on_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        _add_metadata_headers(metadata, request)
-
-        def _return_request(request):
-            return request
-
-        if not fail_on_exist:
-            try:
-                response = self._perform_request(request, parser=_return_request,
-                                                 expected_errors=[_QUEUE_ALREADY_EXISTS_ERROR_CODE])
-                if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                    return False
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_on_exist(ex)
-                return False
-        else:
-            response = self._perform_request(request, parser=_return_request)
-            if response.status == _HTTP_RESPONSE_NO_CONTENT:
-                raise AzureConflictHttpError(
-                    _ERROR_CONFLICT.format(response.message), response.status)
-            return True
-
-    def delete_queue(self, queue_name, fail_not_exist=False, timeout=None):
-        '''
-        Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion 
-        and is no longer accessible to clients. The queue is later removed from 
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete. 
-        If an operation is attempted against the queue while it was being deleted, 
-        an :class:`AzureConflictHttpError` will be thrown.
-
-        :param str queue_name:
-            The name of the queue to delete.
-        :param bool fail_not_exist:
-            Specifies whether to throw an exception if the queue doesn't exist.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A boolean indicating whether the queue was deleted. If fail_not_exist 
-            was set to True, this will throw instead of returning false.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {'timeout': _int_to_str(timeout)}
-        if not fail_not_exist:
-            try:
-                self._perform_request(request, expected_errors=[_QUEUE_NOT_FOUND_ERROR_CODE])
-                return True
-            except AzureHttpError as ex:
-                _dont_fail_not_exist(ex)
-                return False
-        else:
-            self._perform_request(request)
-            return True
-
-    def get_queue_metadata(self, queue_name, timeout=None):
-        '''
-        Retrieves user-defined metadata and queue properties on the specified
-        queue. Metadata is associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A dictionary representing the queue metadata with an 
-            approximate_message_count int property on the dict estimating the 
-            number of messages in the queue.
-        :rtype: dict(str, str)
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _parse_metadata_and_message_count)
-
-    def set_queue_metadata(self, queue_name, metadata=None, timeout=None):
-        '''
-        Sets user-defined metadata on the specified queue. Metadata is
-        associated with the queue as name-value pairs.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param dict metadata:
-            A dict containing name-value pairs to associate with the
-            queue as metadata.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'metadata',
-            'timeout': _int_to_str(timeout),
-        }
-        _add_metadata_headers(metadata, request)
-
-        self._perform_request(request)
-
-    def exists(self, queue_name, timeout=None):
-        '''
-        Returns a boolean indicating whether the queue exists.
-
-        :param str queue_name:
-            The name of queue to check for existence.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A boolean indicating whether the queue exists.
-        :rtype: bool
-        '''
-        _validate_not_none('queue_name', queue_name)
-
-        try:
-            request = HTTPRequest()
-            request.method = 'GET'
-            request.host_locations = self._get_host_locations(secondary=True)
-            request.path = _get_path(queue_name)
-            request.query = {
-                'comp': 'metadata',
-                'timeout': _int_to_str(timeout),
-            }
-
-            self._perform_request(request, expected_errors=[_QUEUE_NOT_FOUND_ERROR_CODE])
-            return True
-        except AzureHttpError as ex:
-            _dont_fail_not_exist(ex)
-            return False
-
-    def get_queue_acl(self, queue_name, timeout=None):
-        '''
-        Returns details about any stored access policies specified on the
-        queue that may be used with Shared Access Signatures.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the queue.
-        :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-
-        return self._perform_request(request, _convert_xml_to_signed_identifiers)
-
-    def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None):
-        '''
-        Sets stored access policies for the queue that may be used with Shared 
-        Access Signatures. 
-        
-        When you set permissions for a queue, the existing permissions are replaced. 
-        To update the queue's permissions, call :func:`~get_queue_acl` to fetch 
-        all access policies associated with the queue, modify the access policy 
-        that you wish to change, and then call this function with the complete 
-        set of data to perform the update.
-
-        When you establish a stored access policy on a queue, it may take up to 
-        30 seconds to take effect. During this interval, a shared access signature 
-        that is associated with the stored access policy will throw an 
-        :class:`AzureHttpError` until the access policy becomes active.
-
-        :param str queue_name:
-            The name of an existing queue.
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the queue. The 
-            dictionary may contain up to 5 elements. An empty dictionary 
-            will clear the access policies set on the service. 
-        :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_access_policies(signed_identifiers)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name)
-        request.query = {
-            'comp': 'acl',
-            'timeout': _int_to_str(timeout),
-        }
-        request.body = _get_request_body(
-            _convert_signed_identifiers_to_xml(signed_identifiers))
-        self._perform_request(request)
-
-    def put_message(self, queue_name, content, visibility_timeout=None,
-                    time_to_live=None, timeout=None):
-        '''
-        Adds a new message to the back of the message queue. 
-
-        The visibility timeout specifies the time that the message will be 
-        invisible. After the timeout expires, the message will become visible. 
-        If a visibility timeout is not specified, the default value of 0 is used.
-
-        The message time-to-live specifies how long a message will remain in the 
-        queue. The message will be deleted from the queue when the time-to-live 
-        period expires.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param str queue_name:
-            The name of the queue to put the message into.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str. The encoded message can be up to 
-            64KB in size.
-        :param int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :param int time_to_live:
-            Specifies the time-to-live interval for the message, in
-            seconds. The time-to-live may be any positive number or -1 for infinity. If this
-            parameter is omitted, the default time-to-live is 7 days.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.models.QueueMessage` object.
-            This object is also populated with the content although it is not
-            returned from the service.
-        :rtype: :class:`~azure.storage.queue.models.QueueMessage`
-        '''
-
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('content', content)
-        request = HTTPRequest()
-        request.method = 'POST'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'visibilitytimeout': _to_str(visibility_timeout),
-            'messagettl': _to_str(time_to_live),
-            'timeout': _int_to_str(timeout)
-        }
-
-        request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
-                                                                    self.key_encryption_key))
-
-        message_list = self._perform_request(request, _convert_xml_to_queue_messages,
-                                             [self.decode_function, False,
-                                              None, None, content])
-        return message_list[0]
-
-    def get_messages(self, queue_name, num_messages=None,
-                     visibility_timeout=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue.
-
-        When a message is retrieved from the queue, the response includes the message 
-        content and a pop_receipt value, which is required to delete the message. 
-        The message is not automatically deleted from the queue, but after it has 
-        been retrieved, it is not visible to other clients for the time interval 
-        specified by the visibility_timeout parameter.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :param str queue_name:
-            The name of the queue to get messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to retrieve from the queue, up to a maximum of 32. If
-            fewer are visible, the visible messages are returned. By default,
-            a single message is retrieved from the queue with this operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds, relative
-            to server time. The new value must be larger than or equal to 1
-            second, and cannot be larger than 7 days. The visibility timeout of 
-            a message can be set to a value later than the expiry time.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-        _validate_decryption_required(self.require_encryption, self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'numofmessages': _to_str(num_messages),
-            'visibilitytimeout': _to_str(visibility_timeout),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queue_messages,
-                                     [self.decode_function, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def peek_messages(self, queue_name, num_messages=None, timeout=None):
-        '''
-        Retrieves one or more messages from the front of the queue, but does
-        not alter the visibility of the message.
-
-        Only messages that are visible may be retrieved. When a message is retrieved 
-        for the first time with a call to get_messages, its dequeue_count property 
-        is set to 1. If it is not deleted and is subsequently retrieved again, the 
-        dequeue_count property is incremented. The client may use this value to 
-        determine how many times a message has been retrieved. Note that a call 
-        to peek_messages does not increment the value of DequeueCount, but returns 
-        this value for the client to read.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :param str queue_name:
-            The name of the queue to peek messages from.
-        :param int num_messages:
-            A nonzero integer value that specifies the number of
-            messages to peek from the queue, up to a maximum of 32. By default,
-            a single message is peeked from the queue with this operation.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that 
-            time_next_visible and pop_receipt will not be populated as peek does 
-            not pop the message and can only retrieve already visible messages.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-
-        _validate_decryption_required(self.require_encryption, self.key_encryption_key,
-                                      self.key_resolver_function)
-
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'GET'
-        request.host_locations = self._get_host_locations(secondary=True)
-        request.path = _get_path(queue_name, True)
-        request.query = {
-            'peekonly': 'true',
-            'numofmessages': _to_str(num_messages),
-            'timeout': _int_to_str(timeout)
-        }
-
-        return self._perform_request(request, _convert_xml_to_queue_messages,
-                                     [self.decode_function, self.require_encryption,
-                                      self.key_encryption_key, self.key_resolver_function])
-
-    def delete_message(self, queue_name, message_id, pop_receipt, timeout=None):
-        '''
-        Deletes the specified message.
-
-        Normally after a client retrieves a message with the get_messages operation, 
-        the client is expected to process and delete the message. To delete the 
-        message, you must have two items of data: id and pop_receipt. The 
-        id is returned from the previous get_messages operation. The 
-        pop_receipt is returned from the most recent :func:`~get_messages` or 
-        :func:`~update_message` operation. In order for the delete_message operation 
-        to succeed, the pop_receipt specified on the request must match the 
-        pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` 
-        operation. 
-
-        :param str queue_name:
-            The name of the queue from which to delete the message.
-        :param str message_id:
-            The message id identifying the message to delete.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message`.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = {
-            'popreceipt': _to_str(pop_receipt),
-            'timeout': _int_to_str(timeout)
-        }
-        self._perform_request(request)
-
-    def clear_messages(self, queue_name, timeout=None):
-        '''
-        Deletes all messages from the specified queue.
-
-        :param str queue_name:
-            The name of the queue whose messages to clear.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        '''
-        _validate_not_none('queue_name', queue_name)
-        request = HTTPRequest()
-        request.method = 'DELETE'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True)
-        request.query = {'timeout': _int_to_str(timeout)}
-        self._perform_request(request)
-
-    def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout,
-                       content=None, timeout=None):
-        '''
-        Updates the visibility timeout of a message. You can also use this
-        operation to update the contents of a message.
-
-        This operation can be used to continually extend the invisibility of a 
-        queue message. This functionality can be useful if you want a worker role 
-        to "lease" a queue message. For example, if a worker role calls get_messages 
-        and recognizes that it needs more time to process a message, it can 
-        continually extend the message's invisibility until it is processed. If 
-        the worker role were to fail during processing, eventually the message 
-        would become visible again and another worker role could process it.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param str queue_name:
-            The name of the queue containing the message to update.
-        :param str message_id:
-            The message id identifying the message to update.
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~get_messages` or :func:`~update_message` operation.
-        :param int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds,
-            relative to server time. The new value must be larger than or equal
-            to 0, and cannot be larger than 7 days. The visibility timeout of a
-            message cannot be set to a value later than the expiry time. A
-            message can be updated until it has been deleted or has expired.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function 
-            set on the service. Default is str.
-        :param int timeout:
-            The server timeout, expressed in seconds.
-        :return: 
-            A list of :class:`~azure.storage.queue.models.QueueMessage` objects. For convenience,
-            this object is also populated with the content, although it is not returned by the service.
-        :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`)
-        '''
-
-        _validate_encryption_required(self.require_encryption, self.key_encryption_key)
-
-        _validate_not_none('queue_name', queue_name)
-        _validate_not_none('message_id', message_id)
-        _validate_not_none('pop_receipt', pop_receipt)
-        _validate_not_none('visibility_timeout', visibility_timeout)
-        request = HTTPRequest()
-        request.method = 'PUT'
-        request.host_locations = self._get_host_locations()
-        request.path = _get_path(queue_name, True, message_id)
-        request.query = {
-            'popreceipt': _to_str(pop_receipt),
-            'visibilitytimeout': _int_to_str(visibility_timeout),
-            'timeout': _int_to_str(timeout)
-        }
-
-        if content is not None:
-            request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
-                                                                        self.key_encryption_key))
-
-        return self._perform_request(request, _parse_queue_message_from_headers)
diff -pruN 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/sharedaccesssignature.py 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/sharedaccesssignature.py
--- 1.4.0-1/azure/multiapi/storage/v2018_11_09/queue/sharedaccesssignature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storage/v2018_11_09/queue/sharedaccesssignature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,117 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ..common.sharedaccesssignature import (
-    SharedAccessSignature,
-    _SharedAccessHelper,
-    _QueryStringConstants,
-    _sign_string,
-)
-from ._constants import X_MS_VERSION
-
-
-class QueueSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating queue shares access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_queue(self, queue_name, permission=None,
-                       expiry=None, start=None, id=None,
-                       ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-
-        :param str queue_name:
-            Name of queue.
-        :param QueuePermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, add, update, process.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _QueueSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(id)
-        sas.add_resource_signature(self.account_name, self.account_key, queue_name)
-
-        return sas.get_token()
-
-
-class _QueueSharedAccessHelper(_SharedAccessHelper):
-    def __init__(self):
-        super(_QueueSharedAccessHelper, self).__init__()
-
-    def add_resource_signature(self, account_name, account_key, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/queue/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(_QueryStringConstants.SIGNED_START) +
-             get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(_QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
-                        _sign_string(account_key, string_to_sign))
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/__init__.py	2025-06-18 05:27:42.000000000 +0000
@@ -1 +1 @@
-﻿__import__('pkg_resources').declare_namespace(__name__)
+﻿
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,210 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-
-from typing import Union, Iterable, AnyStr, IO, Any, Dict  # pylint: disable=unused-import
-from ._version import VERSION
-from ._blob_client import BlobClient
-from ._container_client import ContainerClient
-from ._blob_service_client import BlobServiceClient
-from ._lease import BlobLeaseClient
-from ._download import StorageStreamDownloader
-from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas
-from ._shared.policies import ExponentialRetry, LinearRetry
-from ._shared.response_handlers import PartialBatchErrorException
-from ._shared.models import(
-    LocationMode,
-    ResourceTypes,
-    AccountSasPermissions,
-    StorageErrorCode,
-    UserDelegationKey
-)
-from ._generated.models import (
-    RehydratePriority
-)
-from ._models import (
-    BlobType,
-    BlockState,
-    StandardBlobTier,
-    PremiumPageBlobTier,
-    SequenceNumberAction,
-    PublicAccess,
-    BlobAnalyticsLogging,
-    Metrics,
-    RetentionPolicy,
-    StaticWebsite,
-    CorsRule,
-    ContainerProperties,
-    BlobProperties,
-    LeaseProperties,
-    ContentSettings,
-    CopyProperties,
-    BlobBlock,
-    PageRange,
-    AccessPolicy,
-    ContainerSasPermissions,
-    BlobSasPermissions,
-    CustomerProvidedEncryptionKey,
-    ContainerEncryptionScope
-)
-
-__version__ = VERSION
-
-
-def upload_blob_to_url(
-        blob_url,  # type: str
-        data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-        credential=None,  # type: Any
-        **kwargs):
-    # type: (...) -> Dict[str, Any]
-    """Upload data to a given URL
-
-    The data will be uploaded as a block blob.
-
-    :param str blob_url:
-        The full URI to the blob. This can also include a SAS token.
-    :param data:
-        The data to upload. This can be bytes, text, an iterable or a file-like object.
-    :type data: bytes or str or Iterable
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        blob URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword bool overwrite:
-        Whether the blob to be uploaded should overwrite the current data.
-        If True, upload_blob_to_url will overwrite any existing data. If set to False, the
-        operation will fail with a ResourceExistsError.
-    :keyword int max_concurrency:
-        The number of parallel connections with which to download.
-    :keyword int length:
-        Number of bytes to read from the stream. This is optional, but
-        should be supplied for optimal performance.
-    :keyword dict(str,str) metadata:
-        Name-value pairs associated with the blob as metadata.
-    :keyword bool validate_content:
-        If true, calculates an MD5 hash for each chunk of the blob. The storage
-        service checks the hash of the content that has arrived with the hash
-        that was sent. This is primarily valuable for detecting bitflips on
-        the wire if using http instead of https as https (the default) will
-        already validate. Note that this MD5 hash is not stored with the
-        blob. Also note that if enabled, the memory-efficient upload algorithm
-        will not be used, because computing the MD5 hash requires buffering
-        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-    :keyword str encoding:
-        Encoding to use if text is supplied as input. Defaults to UTF-8.
-    :returns: Blob-updated property dict (Etag and last modified)
-    :rtype: dict(str, Any)
-    """
-    with BlobClient.from_blob_url(blob_url, credential=credential) as client:
-        return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs)
-
-
-def _download_to_stream(client, handle, **kwargs):
-    """Download data to specified open file-handle."""
-    stream = client.download_blob(**kwargs)
-    stream.readinto(handle)
-
-
-def download_blob_from_url(
-        blob_url,  # type: str
-        output,  # type: str
-        credential=None,  # type: Any
-        **kwargs):
-    # type: (...) -> None
-    """Download the contents of a blob to a local file or stream.
-
-    :param str blob_url:
-        The full URI to the blob. This can also include a SAS token.
-    :param output:
-        Where the data should be downloaded to. This could be either a file path to write to,
-        or an open IO handle to write to.
-    :type output: str or writable stream.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
-        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword bool overwrite:
-        Whether the local file should be overwritten if it already exists. The default value is
-        `False` - in which case a ValueError will be raised if the file already exists. If set to
-        `True`, an attempt will be made to write to the existing file. If a stream handle is passed
-        in, this value is ignored.
-    :keyword int max_concurrency:
-        The number of parallel connections with which to download.
-    :keyword int offset:
-        Start of byte range to use for downloading a section of the blob.
-        Must be set if length is provided.
-    :keyword int length:
-        Number of bytes to read from the stream. This is optional, but
-        should be supplied for optimal performance.
-    :keyword bool validate_content:
-        If true, calculates an MD5 hash for each chunk of the blob. The storage
-        service checks the hash of the content that has arrived with the hash
-        that was sent. This is primarily valuable for detecting bitflips on
-        the wire if using http instead of https as https (the default) will
-        already validate. Note that this MD5 hash is not stored with the
-        blob. Also note that if enabled, the memory-efficient upload algorithm
-        will not be used, because computing the MD5 hash requires buffering
-        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-    :rtype: None
-    """
-    overwrite = kwargs.pop('overwrite', False)
-    with BlobClient.from_blob_url(blob_url, credential=credential) as client:
-        if hasattr(output, 'write'):
-            _download_to_stream(client, output, **kwargs)
-        else:
-            if not overwrite and os.path.isfile(output):
-                raise ValueError("The file '{}' already exists.".format(output))
-            with open(output, 'wb') as file_handle:
-                _download_to_stream(client, file_handle, **kwargs)
-
-
-__all__ = [
-    'upload_blob_to_url',
-    'download_blob_from_url',
-    'BlobServiceClient',
-    'ContainerClient',
-    'BlobClient',
-    'BlobType',
-    'BlobLeaseClient',
-    'StorageErrorCode',
-    'UserDelegationKey',
-    'ExponentialRetry',
-    'LinearRetry',
-    'LocationMode',
-    'BlockState',
-    'StandardBlobTier',
-    'PremiumPageBlobTier',
-    'SequenceNumberAction',
-    'PublicAccess',
-    'BlobAnalyticsLogging',
-    'Metrics',
-    'RetentionPolicy',
-    'StaticWebsite',
-    'CorsRule',
-    'ContainerProperties',
-    'BlobProperties',
-    'LeaseProperties',
-    'ContentSettings',
-    'CopyProperties',
-    'BlobBlock',
-    'PageRange',
-    'AccessPolicy',
-    'ContainerSasPermissions',
-    'BlobSasPermissions',
-    'ResourceTypes',
-    'AccountSasPermissions',
-    'StorageStreamDownloader',
-    'CustomerProvidedEncryptionKey',
-    'RehydratePriority',
-    'generate_account_sas',
-    'generate_container_sas',
-    'generate_blob_sas',
-    'PartialBatchErrorException',
-    'ContainerEncryptionScope'
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_client.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_client.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,3008 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-many-lines,no-self-use
-
-from io import BytesIO
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
-    TYPE_CHECKING
-)
-try:
-    from urllib.parse import urlparse, quote, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-from azure.core.tracing.decorator import distributed_trace
-
-from ._shared import encode_base64
-from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query
-from ._shared.encryption import generate_blob_encryption_data
-from ._shared.uploads import IterStreamer
-from ._shared.request_handlers import (
-    add_metadata_headers, get_length, read_length,
-    validate_and_format_range_headers)
-from ._shared.response_handlers import return_response_headers, process_storage_error
-from ._generated import AzureBlobStorage, VERSION
-from ._generated.models import ( # pylint: disable=unused-import
-    DeleteSnapshotsOptionType,
-    BlobHTTPHeaders,
-    BlockLookupList,
-    AppendPositionAccessConditions,
-    SequenceNumberAccessConditions,
-    StorageErrorException,
-    UserDelegationKey,
-    CpkInfo)
-from ._serialize import get_modify_conditions, get_source_conditions, get_cpk_scope_info, get_api_version
-from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream
-from ._upload_helpers import (
-    upload_block_blob,
-    upload_append_blob,
-    upload_page_blob)
-from ._models import BlobType, BlobBlock
-from ._download import StorageStreamDownloader
-from ._lease import BlobLeaseClient, get_access_conditions
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from ._generated.models import BlockList
-    from ._models import (  # pylint: disable=unused-import
-        ContainerProperties,
-        BlobProperties,
-        BlobSasPermissions,
-        ContentSettings,
-        PremiumPageBlobTier,
-        StandardBlobTier,
-        SequenceNumberAction
-    )
-
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
-    'The require_encryption flag is set, but encryption is not supported'
-    ' for this method.')
-
-
-class BlobClient(StorageAccountHostsMixin):  # pylint: disable=too-many-public-methods
-    """A client to interact with a specific blob, although that blob may not yet exist.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the blob,
-        use the :func:`from_blob_url` classmethod.
-    :param container_name: The container name for the blob.
-    :type container_name: str
-    :param blob_name: The name of the blob with which to interact. If specified, this value will override
-        a blob value specified in the blob URL.
-    :type blob_name: str
-    :param str snapshot:
-        The optional blob snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`create_snapshot`.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.2.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
-        Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be
-        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
-        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
-    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
-        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
-    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
-    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
-        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
-    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
-        or 4MB.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_authentication.py
-            :start-after: [START create_blob_client]
-            :end-before: [END create_blob_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
-
-        .. literalinclude:: ../samples/blob_samples_authentication.py
-            :start-after: [START create_blob_client_sas_url]
-            :end-before: [END create_blob_client_sas_url]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobClient from a SAS URL to a blob.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            container_name,  # type: str
-            blob_name,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-
-        if not (container_name and blob_name):
-            raise ValueError("Please specify a container name and blob name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        path_snapshot, sas_token = parse_query(parsed_url.query)
-
-        self.container_name = container_name
-        self.blob_name = blob_name
-        try:
-            self.snapshot = snapshot.snapshot # type: ignore
-        except AttributeError:
-            try:
-                self.snapshot = snapshot['snapshot'] # type: ignore
-            except TypeError:
-                self.snapshot = snapshot or path_snapshot
-
-        self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot)
-        super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
-        self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    def _format_url(self, hostname):
-        container_name = self.container_name
-        if isinstance(container_name, six.text_type):
-            container_name = container_name.encode('UTF-8')
-        return "{}://{}/{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(container_name),
-            quote(self.blob_name, safe='~/'),
-            self._query_str)
-
-    @classmethod
-    def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs):
-        # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient
-        """Create BlobClient from a blob url.
-
-        :param str blob_url:
-            The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be
-            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
-        :type blob_url: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :param str snapshot:
-            The optional blob snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`create_snapshot`. If specified, this will override
-            the snapshot in the url.
-        :returns: A Blob client.
-        :rtype: ~azure.storage.blob.BlobClient
-        """
-        try:
-            if not blob_url.lower().startswith('http'):
-                blob_url = "https://" + blob_url
-        except AttributeError:
-            raise ValueError("Blob URL must be a string.")
-        parsed_url = urlparse(blob_url.rstrip('/'))
-
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(blob_url))
-
-        path_blob = parsed_url.path.lstrip('/').split('/')
-        account_path = ""
-        if len(path_blob) > 2:
-            account_path = "/" + "/".join(path_blob[:-2])
-        account_url = "{}://{}{}?{}".format(
-            parsed_url.scheme,
-            parsed_url.netloc.rstrip('/'),
-            account_path,
-            parsed_url.query)
-        container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1])
-        if not container_name or not blob_name:
-            raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.")
-
-        path_snapshot, _ = parse_query(parsed_url.query)
-        if snapshot:
-            try:
-                path_snapshot = snapshot.snapshot # type: ignore
-            except AttributeError:
-                try:
-                    path_snapshot = snapshot['snapshot'] # type: ignore
-                except TypeError:
-                    path_snapshot = snapshot
-
-        return cls(
-            account_url, container_name=container_name, blob_name=blob_name,
-            snapshot=path_snapshot, credential=credential, **kwargs
-        )
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            container_name,  # type: str
-            blob_name,  # type: str
-            snapshot=None,  # type: Optional[str]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> BlobClient
-        """Create BlobClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param container_name: The container name for the blob.
-        :type container_name: str
-        :param blob_name: The name of the blob with which to interact.
-        :type blob_name: str
-        :param str snapshot:
-            The optional blob snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`create_snapshot`.
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :returns: A Blob client.
-        :rtype: ~azure.storage.blob.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_authentication.py
-                :start-after: [START auth_from_connection_string_blob]
-                :end-before: [END auth_from_connection_string_blob]
-                :language: python
-                :dedent: 8
-                :caption: Creating the BlobClient from a connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, container_name=container_name, blob_name=blob_name,
-            snapshot=snapshot, credential=credential, **kwargs
-        )
-
-    @distributed_trace
-    def get_account_information(self, **kwargs):
-        # type: (**Any) -> Dict[str, str]
-        """Gets information related to the storage account in which the blob resides.
-
-        The information can also be retrieved if the user has a SAS to a container or blob.
-        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
-
-        :returns: A dict of account information (SKU and account type).
-        :rtype: dict(str, str)
-        """
-        try:
-            return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _upload_blob_options(  # pylint:disable=too-many-statements
-            self, data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            blob_type=BlobType.BlockBlob,  # type: Union[str, BlobType]
-            length=None,  # type: Optional[int]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption and not self.key_encryption_key:
-            raise ValueError("Encryption required but no key was provided.")
-        encryption_options = {
-            'required': self.require_encryption,
-            'key': self.key_encryption_key,
-            'resolver': self.key_resolver_function,
-        }
-        if self.key_encryption_key is not None:
-            cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key)
-            encryption_options['cek'] = cek
-            encryption_options['vector'] = iv
-            encryption_options['data'] = encryption_data
-
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        if isinstance(data, six.text_type):
-            data = data.encode(encoding) # type: ignore
-        if length is None:
-            length = get_length(data)
-        if isinstance(data, bytes):
-            data = data[:length]
-
-        if isinstance(data, bytes):
-            stream = BytesIO(data)
-        elif hasattr(data, 'read'):
-            stream = data
-        elif hasattr(data, '__iter__'):
-            stream = IterStreamer(data, encoding=encoding)
-        else:
-            raise TypeError("Unsupported data type: {}".format(type(data)))
-
-        validate_content = kwargs.pop('validate_content', False)
-        content_settings = kwargs.pop('content_settings', None)
-        overwrite = kwargs.pop('overwrite', False)
-        max_concurrency = kwargs.pop('max_concurrency', 1)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        kwargs['cpk_info'] = cpk_info
-
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
-        kwargs['modified_access_conditions'] = get_modify_conditions(kwargs)
-        kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs)
-        if content_settings:
-            kwargs['blob_headers'] = BlobHTTPHeaders(
-                blob_cache_control=content_settings.cache_control,
-                blob_content_type=content_settings.content_type,
-                blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                blob_content_encoding=content_settings.content_encoding,
-                blob_content_language=content_settings.content_language,
-                blob_content_disposition=content_settings.content_disposition
-            )
-        kwargs['stream'] = stream
-        kwargs['length'] = length
-        kwargs['overwrite'] = overwrite
-        kwargs['headers'] = headers
-        kwargs['validate_content'] = validate_content
-        kwargs['blob_settings'] = self._config
-        kwargs['max_concurrency'] = max_concurrency
-        kwargs['encryption_options'] = encryption_options
-        if blob_type == BlobType.BlockBlob:
-            kwargs['client'] = self._client.block_blob
-            kwargs['data'] = data
-        elif blob_type == BlobType.PageBlob:
-            kwargs['client'] = self._client.page_blob
-        elif blob_type == BlobType.AppendBlob:
-            if self.require_encryption or (self.key_encryption_key is not None):
-                raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-            kwargs['client'] = self._client.append_blob
-        else:
-            raise ValueError("Unsupported BlobType: {}".format(blob_type))
-        return kwargs
-
-    @distributed_trace
-    def upload_blob(  # pylint: disable=too-many-locals
-            self, data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            blob_type=BlobType.BlockBlob,  # type: Union[str, BlobType]
-            length=None,  # type: Optional[int]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Any
-        """Creates a new blob from a data source with automatic chunking.
-
-        :param data: The blob data to upload.
-        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
-            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
-            If True, upload_blob will overwrite the existing data. If set to False, the
-            operation will fail with ResourceExistsError. The exception to the above is with Append
-            blob types: if set to False and the data already exists, an error will not be raised
-            and the data will be appended to the existing blob. If set overwrite=True, then the existing
-            append blob will be deleted, and a new one created. Defaults to False.
-        :keyword ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, upload_blob only succeeds if the
-            blob's lease is active and matches this ID. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: Blob-updated property dict (Etag and last modified)
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_hello_world.py
-                :start-after: [START upload_a_blob]
-                :end-before: [END upload_a_blob]
-                :language: python
-                :dedent: 12
-                :caption: Upload a blob to the container.
-        """
-        options = self._upload_blob_options(
-            data,
-            blob_type=blob_type,
-            length=length,
-            metadata=metadata,
-            **kwargs)
-        if blob_type == BlobType.BlockBlob:
-            return upload_block_blob(**options)
-        if blob_type == BlobType.PageBlob:
-            return upload_page_blob(**options)
-        return upload_append_blob(**options)
-
-    def _download_blob_options(self, offset=None, length=None, **kwargs):
-        # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any]
-        if self.require_encryption and not self.key_encryption_key:
-            raise ValueError("Encryption required but no key was provided.")
-        if length is not None and offset is None:
-            raise ValueError("Offset value must not be None if length is set.")
-        if length is not None:
-            length = offset + length - 1  # Service actually uses an end-range inclusive index
-
-        validate_content = kwargs.pop('validate_content', False)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'clients': self._client,
-            'config': self._config,
-            'start_range': offset,
-            'end_range': length,
-            'validate_content': validate_content,
-            'encryption_options': {
-                'required': self.require_encryption,
-                'key': self.key_encryption_key,
-                'resolver': self.key_resolver_function},
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_info': cpk_info,
-            'cls': deserialize_blob_stream,
-            'max_concurrency':kwargs.pop('max_concurrency', 1),
-            'encoding': kwargs.pop('encoding', None),
-            'timeout': kwargs.pop('timeout', None),
-            'name': self.blob_name,
-            'container': self.container_name}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def download_blob(self, offset=None, length=None, **kwargs):
-        # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader
-        """Downloads a blob to the StorageStreamDownloader. The readall() method must
-        be used to read all the content or readinto() must be used to download the blob into
-        a stream.
-
-        :param int offset:
-            Start of byte range to use for downloading a section of the blob.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, download_blob only
-            succeeds if the blob's lease is active and matches this ID. Value can be a
-            BlobLeaseClient object or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :keyword str encoding:
-            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: A streaming object (StorageStreamDownloader)
-        :rtype: ~azure.storage.blob.StorageStreamDownloader
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_hello_world.py
-                :start-after: [START download_a_blob]
-                :end-before: [END download_a_blob]
-                :language: python
-                :dedent: 12
-                :caption: Download a blob.
-        """
-        options = self._download_blob_options(
-            offset=offset,
-            length=length,
-            **kwargs)
-        return StorageStreamDownloader(**options)
-
-    @staticmethod
-    def _generic_delete_blob_options(delete_snapshots=False, **kwargs):
-        # type: (bool, **Any) -> Dict[str, Any]
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        if delete_snapshots:
-            delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots)
-        options = {
-            'timeout': kwargs.pop('timeout', None),
-            'delete_snapshots': delete_snapshots or None,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions}
-        options.update(kwargs)
-        return options
-
-    def _delete_blob_options(self, delete_snapshots=False, **kwargs):
-        # type: (bool, **Any) -> Dict[str, Any]
-        if self.snapshot and delete_snapshots:
-            raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.")
-        options = self._generic_delete_blob_options(delete_snapshots, **kwargs)
-        options['snapshot'] = self.snapshot
-        return options
-
-    @distributed_trace
-    def delete_blob(self, delete_snapshots=False, **kwargs):
-        # type: (bool, **Any) -> None
-        """Marks the specified blob for deletion.
-
-        The blob is later deleted during garbage collection.
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the delete_blob()
-        operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blob
-        and retains the blob for a specified number of days.
-        After the specified number of days, the blob's data is removed from the service during garbage collection.
-        Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
-        option. Soft-deleted blob can be restored using :func:`undelete` operation.
-
-        :param str delete_snapshots:
-            Required if the blob has associated snapshots. Values include:
-             - "only": Deletes only the blobs snapshots.
-             - "include": Deletes the blob along with all snapshots.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, delete_blob only
-            succeeds if the blob's lease is active and matches this ID. Value can be a
-            BlobLeaseClient object or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_hello_world.py
-                :start-after: [START delete_blob]
-                :end-before: [END delete_blob]
-                :language: python
-                :dedent: 12
-                :caption: Delete a blob.
-        """
-        options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs)
-        try:
-            self._client.blob.delete(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def undelete_blob(self, **kwargs):
-        # type: (**Any) -> None
-        """Restores soft-deleted blobs or snapshots.
-
-        Operation will only be successful if used within the specified number of days
-        set in the delete retention policy.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START undelete_blob]
-                :end-before: [END undelete_blob]
-                :language: python
-                :dedent: 8
-                :caption: Undeleting a blob.
-        """
-        try:
-            self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_blob_properties(self, **kwargs):
-        # type: (**Any) -> BlobProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the blob. It does not return the content of the blob.
-
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: BlobProperties
-        :rtype: ~azure.storage.blob.BlobProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a blob.
-        """
-        # TODO: extract this out as _get_blob_properties_options
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        try:
-            blob_props = self._client.blob.get_properties(
-                timeout=kwargs.pop('timeout', None),
-                snapshot=self.snapshot,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                cls=deserialize_blob_properties,
-                cpk_info=cpk_info,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        blob_props.name = self.blob_name
-        blob_props.container = self.container_name
-        return blob_props # type: ignore
-
-    def _set_http_headers_options(self, content_settings=None, **kwargs):
-        # type: (Optional[ContentSettings], **Any) -> Dict[str, Any]
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        blob_headers = None
-        if content_settings:
-            blob_headers = BlobHTTPHeaders(
-                blob_cache_control=content_settings.cache_control,
-                blob_content_type=content_settings.content_type,
-                blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                blob_content_encoding=content_settings.content_encoding,
-                blob_content_language=content_settings.content_language,
-                blob_content_disposition=content_settings.content_disposition
-            )
-        options = {
-            'timeout': kwargs.pop('timeout', None),
-            'blob_http_headers': blob_headers,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def set_http_headers(self, content_settings=None, **kwargs):
-        # type: (Optional[ContentSettings], **Any) -> None
-        """Sets system properties on the blob.
-
-        If one property is set for the content_settings, all properties will be overridden.
-
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified)
-        :rtype: Dict[str, Any]
-        """
-        options = self._set_http_headers_options(content_settings=content_settings, **kwargs)
-        try:
-            return self._client.blob.set_http_headers(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _set_blob_metadata_options(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        options = {
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-            'headers': headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def set_blob_metadata(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]]
-        """Sets user-defined metadata for the blob as one or more name-value pairs.
-
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the blob. To remove all
-            metadata from the blob, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified)
-        """
-        options = self._set_blob_metadata_options(metadata=metadata, **kwargs)
-        try:
-            return self._client.blob.set_metadata(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _create_page_blob_options(  # type: ignore
-            self, size,  # type: int
-            content_settings=None,  # type: Optional[ContentSettings]
-            metadata=None, # type: Optional[Dict[str, str]]
-            premium_page_blob_tier=None,  # type: Optional[Union[str, PremiumPageBlobTier]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        blob_headers = None
-        if content_settings:
-            blob_headers = BlobHTTPHeaders(
-                blob_cache_control=content_settings.cache_control,
-                blob_content_type=content_settings.content_type,
-                blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                blob_content_encoding=content_settings.content_encoding,
-                blob_content_language=content_settings.content_language,
-                blob_content_disposition=content_settings.content_disposition
-            )
-
-        sequence_number = kwargs.pop('sequence_number', None)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        if premium_page_blob_tier:
-            try:
-                headers['x-ms-access-tier'] = premium_page_blob_tier.value  # type: ignore
-            except AttributeError:
-                headers['x-ms-access-tier'] = premium_page_blob_tier  # type: ignore
-        options = {
-            'content_length': 0,
-            'blob_content_length': size,
-            'blob_sequence_number': sequence_number,
-            'blob_http_headers': blob_headers,
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-            'headers': headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def create_page_blob(  # type: ignore
-            self, size,  # type: int
-            content_settings=None,  # type: Optional[ContentSettings]
-            metadata=None, # type: Optional[Dict[str, str]]
-            premium_page_blob_tier=None,  # type: Optional[Union[str, PremiumPageBlobTier]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Creates a new Page Blob of the specified size.
-
-        :param int size:
-            This specifies the maximum size for the page blob, up to 1 TB.
-            The page blob size must be aligned to a 512-byte boundary.
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword int sequence_number:
-            Only for Page blobs. The sequence number is a user-controlled value that you can use to
-            track requests. The value of the sequence number must be between 0
-            and 2^63 - 1.The default value is 0.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict[str, Any]
-        """
-        options = self._create_page_blob_options(
-            size,
-            content_settings=content_settings,
-            metadata=metadata,
-            premium_page_blob_tier=premium_page_blob_tier,
-            **kwargs)
-        try:
-            return self._client.page_blob.create(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        blob_headers = None
-        if content_settings:
-            blob_headers = BlobHTTPHeaders(
-                blob_cache_control=content_settings.cache_control,
-                blob_content_type=content_settings.content_type,
-                blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                blob_content_encoding=content_settings.content_encoding,
-                blob_content_language=content_settings.content_language,
-                blob_content_disposition=content_settings.content_disposition
-            )
-
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'content_length': 0,
-            'blob_http_headers': blob_headers,
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-            'headers': headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def create_append_blob(self, content_settings=None, metadata=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]]
-        """Creates a new Append Blob.
-
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict[str, Any]
-        """
-        options = self._create_append_blob_options(
-            content_settings=content_settings,
-            metadata=metadata,
-            **kwargs)
-        try:
-            return self._client.append_blob.create(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _create_snapshot_options(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-            'headers': headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def create_snapshot(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]]
-        """Creates a snapshot of the blob.
-
-        A snapshot is a read-only version of a blob that's taken at a point in time.
-        It can be read, copied, or deleted, but not modified. Snapshots provide a way
-        to back up a blob as it appears at a moment in time.
-
-        A snapshot of a blob has the same name as the base blob from which the snapshot
-        is taken, with a DateTime value appended to indicate the time at which the
-        snapshot was taken.
-
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START create_blob_snapshot]
-                :end-before: [END create_blob_snapshot]
-                :language: python
-                :dedent: 8
-                :caption: Create a snapshot of the blob.
-        """
-        options = self._create_snapshot_options(metadata=metadata, **kwargs)
-        try:
-            return self._client.blob.create_snapshot(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs):
-        # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any]
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        if 'source_lease' in kwargs:
-            source_lease = kwargs.pop('source_lease')
-            try:
-                headers['x-ms-source-lease-id'] = source_lease.id # type: str
-            except AttributeError:
-                headers['x-ms-source-lease-id'] = source_lease
-
-        tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None)
-
-        if kwargs.get('requires_sync'):
-            headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync'))
-
-        timeout = kwargs.pop('timeout', None)
-        dest_mod_conditions = get_modify_conditions(kwargs)
-        options = {
-            'copy_source': source_url,
-            'timeout': timeout,
-            'modified_access_conditions': dest_mod_conditions,
-            'headers': headers,
-            'cls': return_response_headers,
-        }
-        if not incremental_copy:
-            source_mod_conditions = get_source_conditions(kwargs)
-            dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None))
-            options['source_modified_access_conditions'] = source_mod_conditions
-            options['lease_access_conditions'] = dest_access_conditions
-            options['tier'] = tier.value if tier else None
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs):
-        # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]]
-        """Copies a blob asynchronously.
-
-        This operation returns a copy operation
-        object that can be used to wait on the completion of the operation,
-        as well as check status or abort the copy operation.
-        The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation may be a block blob, an append blob,
-        or a page blob. If the destination blob already exists, it must be of the
-        same blob type as the source blob. Any existing destination blob will be
-        overwritten. The destination blob cannot be modified while a copy operation
-        is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page
-        blob of the source blob's length, initially containing all zeroes. Then
-        the source page ranges are enumerated, and non-empty ranges are copied.
-
-        For a block blob or an append blob, the Blob service creates a committed
-        blob of zero length before returning from this operation. When copying
-        from a block blob, all committed blocks and their block IDs are copied.
-        Uncommitted blocks are not copied. At the end of the copy operation, the
-        destination blob will have the same committed block count as the source.
-
-        When copying from an append blob, all committed blocks are copied. At the
-        end of the copy operation, the destination blob will have the same committed
-        block count as the source.
-
-        For all blob types, you can call status() on the returned polling object
-        to check the status of the copy operation, or wait() to block until the
-        operation is complete. The final blob will be committed when the copy completes.
-
-        :param str source_url:
-            A URL of up to 2 KB in length that specifies a file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str)
-        :param bool incremental_copy:
-            Copies the snapshot of the source page blob to a destination page blob.
-            The snapshot is copied such that only the differential changes between
-            the previously copied snapshot are transferred to the destination.
-            The copied snapshots are complete copies of the original snapshot and
-            can be read or copied from as usual. Defaults to False.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :keyword str etag:
-            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The destination match condition to use upon the etag.
-        :keyword destination_lease:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword source_lease:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
-            Indicates the priority with which to rehydrate an archived blob
-        :keyword bool requires_sync:
-            Enforces that the service will not return a response until the copy is complete.
-        :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
-        :rtype: dict[str, str or ~datetime.datetime]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START copy_blob_from_url]
-                :end-before: [END copy_blob_from_url]
-                :language: python
-                :dedent: 12
-                :caption: Copy a blob from a URL.
-        """
-        options = self._start_copy_from_url_options(
-            source_url,
-            metadata=metadata,
-            incremental_copy=incremental_copy,
-            **kwargs)
-        try:
-            if incremental_copy:
-                return self._client.page_blob.copy_incremental(**options)
-            return self._client.blob.start_copy_from_url(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _abort_copy_options(self, copy_id, **kwargs):
-        # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any]
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        try:
-            copy_id = copy_id.copy.id
-        except AttributeError:
-            try:
-                copy_id = copy_id['copy_id']
-            except TypeError:
-                pass
-        options = {
-            'copy_id': copy_id,
-            'lease_access_conditions': access_conditions,
-            'timeout': kwargs.pop('timeout', None)}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def abort_copy(self, copy_id, **kwargs):
-        # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None
-        """Abort an ongoing copy operation.
-
-        This will leave a destination blob with zero length and full metadata.
-        This will raise an error if the copy operation has already ended.
-
-        :param copy_id:
-            The copy operation to abort. This can be either an ID string, or an
-            instance of BlobProperties.
-        :type copy_id: str or ~azure.storage.blob.BlobProperties
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START abort_copy_blob_from_url]
-                :end-before: [END abort_copy_blob_from_url]
-                :language: python
-                :dedent: 12
-                :caption: Abort copying a blob from URL.
-        """
-        options = self._abort_copy_options(copy_id, **kwargs)
-        try:
-            self._client.blob.abort_copy_from_url(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs):
-        # type: (int, Optional[str], **Any) -> BlobLeaseClient
-        """Requests a new lease.
-
-        If the blob does not have an active lease, the Blob
-        Service creates a lease on the blob and returns a new lease.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The Blob Service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A BlobLeaseClient object.
-        :rtype: ~azure.storage.blob.BlobLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START acquire_lease_on_blob]
-                :end-before: [END acquire_lease_on_blob]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on a blob.
-        """
-        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
-        lease.acquire(lease_duration=lease_duration, **kwargs)
-        return lease
-
-    @distributed_trace
-    def set_standard_blob_tier(self, standard_blob_tier, **kwargs):
-        # type: (Union[str, StandardBlobTier], Any) -> None
-        """This operation sets the tier on a block blob.
-
-        A block blob's tier determines Hot/Cool/Archive storage type.
-        This operation does not update the blob's ETag.
-
-        :param standard_blob_tier:
-            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
-            'Archive'. The hot tier is optimized for storing data that is accessed
-            frequently. The cool storage tier is optimized for storing data that
-            is infrequently accessed and stored for at least a month. The archive
-            tier is optimized for storing data that is rarely accessed and stored
-            for at least six months with flexible latency requirements.
-        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
-        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
-            Indicates the priority with which to rehydrate an archived blob
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :rtype: None
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if standard_blob_tier is None:
-            raise ValueError("A StandardBlobTier must be specified")
-        try:
-            self._client.blob.set_tier(
-                tier=standard_blob_tier,
-                timeout=kwargs.pop('timeout', None),
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _stage_block_options(
-            self, block_id,  # type: str
-            data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-        block_id = encode_base64(str(block_id))
-        if isinstance(data, six.text_type):
-            data = data.encode(kwargs.pop('encoding', 'UTF-8'))  # type: ignore
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if length is None:
-            length = get_length(data)
-            if length is None:
-                length, data = read_length(data)
-        if isinstance(data, bytes):
-            data = data[:length]
-
-        validate_content = kwargs.pop('validate_content', False)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'block_id': block_id,
-            'content_length': length,
-            'body': data,
-            'transactional_content_md5': None,
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'validate_content': validate_content,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-        }
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def stage_block(
-            self, block_id,  # type: str
-            data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Creates a new block to be committed as part of a blob.
-
-        :param str block_id: A valid Base64 string value that identifies the
-             block. Prior to encoding, the string must be less than or equal to 64
-             bytes in size. For a given blob, the length of the value specified for
-             the block_id parameter must be the same size for each block.
-        :param data: The blob data.
-        :param int length: Size of the block.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob property dict.
-        :rtype: dict[str, Any]
-        """
-        options = self._stage_block_options(
-            block_id,
-            data,
-            length=length,
-            **kwargs)
-        try:
-            return self._client.block_blob.stage_block(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _stage_block_from_url_options(
-            self, block_id,  # type: str
-            source_url,  # type: str
-            source_offset=None,  # type: Optional[int]
-            source_length=None,  # type: Optional[int]
-            source_content_md5=None,  # type: Optional[Union[bytes, bytearray]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if source_length is not None and source_offset is None:
-            raise ValueError("Source offset value must not be None if length is set.")
-        if source_length is not None:
-            source_length = source_offset + source_length - 1
-        block_id = encode_base64(str(block_id))
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        range_header = None
-        if source_offset is not None:
-            range_header, _ = validate_and_format_range_headers(source_offset, source_length)
-
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        options = {
-            'block_id': block_id,
-            'content_length': 0,
-            'source_url': source_url,
-            'source_range': range_header,
-            'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None,
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-        }
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def stage_block_from_url(
-            self, block_id,  # type: str
-            source_url,  # type: str
-            source_offset=None,  # type: Optional[int]
-            source_length=None,  # type: Optional[int]
-            source_content_md5=None,  # type: Optional[Union[bytes, bytearray]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Creates a new block to be committed as part of a blob where
-        the contents are read from a URL.
-
-        :param str block_id: A valid Base64 string value that identifies the
-             block. Prior to encoding, the string must be less than or equal to 64
-             bytes in size. For a given blob, the length of the value specified for
-             the block_id parameter must be the same size for each block.
-        :param str source_url: The URL.
-        :param int source_offset:
-            Start of byte range to use for the block.
-            Must be set if source length is provided.
-        :param int source_length: The size of the block in bytes.
-        :param bytearray source_content_md5:
-            Specify the md5 calculated for the range of
-            bytes that must be read from the copy source.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob property dict.
-        :rtype: dict[str, Any]
-        """
-        options = self._stage_block_from_url_options(
-            block_id,
-            source_url,
-            source_offset=source_offset,
-            source_length=source_length,
-            source_content_md5=source_content_md5,
-            **kwargs)
-        try:
-            return self._client.block_blob.stage_block_from_url(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _get_block_list_result(self, blocks):
-        # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]]
-        committed = [] # type: List
-        uncommitted = [] # type: List
-        if blocks.committed_blocks:
-            committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks]  # pylint: disable=protected-access
-        if blocks.uncommitted_blocks:
-            uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks]  # pylint: disable=protected-access
-        return committed, uncommitted
-
-    @distributed_trace
-    def get_block_list(self, block_list_type="committed", **kwargs):
-        # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]]
-        """The Get Block List operation retrieves the list of blocks that have
-        been uploaded as part of a block blob.
-
-        :param str block_list_type:
-            Specifies whether to return the list of committed
-            blocks, the list of uncommitted blocks, or both lists together.
-            Possible values include: 'committed', 'uncommitted', 'all'
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A tuple of two lists - committed and uncommitted blocks
-        :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock))
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        try:
-            blocks = self._client.block_blob.get_block_list(
-                list_type=block_list_type,
-                snapshot=self.snapshot,
-                timeout=kwargs.pop('timeout', None),
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return self._get_block_list_result(blocks)
-
-    def _commit_block_list_options( # type: ignore
-            self, block_list,  # type: List[BlobBlock]
-            content_settings=None,  # type: Optional[ContentSettings]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-        block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
-        for block in block_list:
-            try:
-                if block.state.value == 'committed':
-                    block_lookup.committed.append(encode_base64(str(block.id)))
-                elif block.state.value == 'uncommitted':
-                    block_lookup.uncommitted.append(encode_base64(str(block.id)))
-                else:
-                    block_lookup.latest.append(encode_base64(str(block.id)))
-            except AttributeError:
-                block_lookup.latest.append(encode_base64(str(block)))
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        blob_headers = None
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        if content_settings:
-            blob_headers = BlobHTTPHeaders(
-                blob_cache_control=content_settings.cache_control,
-                blob_content_type=content_settings.content_type,
-                blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                blob_content_encoding=content_settings.content_encoding,
-                blob_content_language=content_settings.content_language,
-                blob_content_disposition=content_settings.content_disposition
-            )
-
-        validate_content = kwargs.pop('validate_content', False)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        tier = kwargs.pop('standard_blob_tier', None)
-
-        options = {
-            'blocks': block_lookup,
-            'blob_http_headers': blob_headers,
-            'lease_access_conditions': access_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'modified_access_conditions': mod_conditions,
-            'cls': return_response_headers,
-            'validate_content': validate_content,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'tier': tier.value if tier else None,
-            'headers': headers
-        }
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def commit_block_list( # type: ignore
-            self, block_list,  # type: List[BlobBlock]
-            content_settings=None,  # type: Optional[ContentSettings]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """The Commit Block List operation writes a blob by specifying the list of
-        block IDs that make up the blob.
-
-        :param list block_list:
-            List of Blockblobs.
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict[str, str]
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https, as https (the default),
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._commit_block_list_options(
-            block_list,
-            content_settings=content_settings,
-            metadata=metadata,
-            **kwargs)
-        try:
-            return self._client.block_blob.commit_block_list(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs):
-        # type: (Union[str, PremiumPageBlobTier], **Any) -> None
-        """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
-
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :rtype: None
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if premium_page_blob_tier is None:
-            raise ValueError("A PremiumPageBlobTier must be specified")
-        try:
-            self._client.blob.set_tier(
-                tier=premium_page_blob_tier,
-                timeout=kwargs.pop('timeout', None),
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _get_page_ranges_options( # type: ignore
-            self, offset=None, # type: Optional[int]
-            length=None, # type: Optional[int]
-            previous_snapshot_diff=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        if length is not None and offset is None:
-            raise ValueError("Offset value must not be None if length is set.")
-        if length is not None:
-            length = offset + length - 1  # Reformat to an inclusive range index
-        page_range, _ = validate_and_format_range_headers(
-            offset, length, start_range_required=False, end_range_required=False, align_to_page=True
-        )
-        options = {
-            'snapshot': self.snapshot,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'range': page_range}
-        if previous_snapshot_diff:
-            try:
-                options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore
-            except AttributeError:
-                try:
-                    options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore
-                except TypeError:
-                    options['prevsnapshot'] = previous_snapshot_diff
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def get_page_ranges( # type: ignore
-            self, offset=None, # type: Optional[int]
-            length=None, # type: Optional[int]
-            previous_snapshot_diff=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            **kwargs
-        ):
-        # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
-        """Returns the list of valid page ranges for a Page Blob or snapshot
-        of a page blob.
-
-        :param int offset:
-            Start of byte range to use for getting valid page ranges.
-            If no length is given, all bytes after the offset will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for getting valid page ranges.
-            If length is given, offset must be provided.
-            This range will return valid page ranges from the offset start up to
-            the specified length.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param str previous_snapshot_diff:
-            The snapshot diff parameter that contains an opaque DateTime value that
-            specifies a previous blob snapshot to be compared
-            against a more recent snapshot or the current blob.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
-            The first element are filled page ranges, the 2nd element is cleared page ranges.
-        :rtype: tuple(list(dict(str, str), list(dict(str, str))
-        """
-        options = self._get_page_ranges_options(
-            offset=offset,
-            length=length,
-            previous_snapshot_diff=previous_snapshot_diff,
-            **kwargs)
-        try:
-            if previous_snapshot_diff:
-                ranges = self._client.page_blob.get_page_ranges_diff(**options)
-            else:
-                ranges = self._client.page_blob.get_page_ranges(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return get_page_ranges_result(ranges)
-
-    @distributed_trace
-    def get_page_range_diff_for_managed_disk(
-            self, previous_snapshot_url,  # type: str
-            offset=None, # type: Optional[int]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
-        """Returns the list of valid page ranges for a managed disk or snapshot.
-
-        .. note::
-            This operation is only available for managed disk accounts.
-
-        .. versionadded:: 12.2.0
-            This operation was introduced in API version '2019-07-07'.
-
-        :param previous_snapshot_url:
-            Specifies the URL of a previous snapshot of the managed disk.
-            The response will only contain pages that were changed between the target blob and
-            its previous snapshot.
-        :param int offset:
-            Start of byte range to use for getting valid page ranges.
-            If no length is given, all bytes after the offset will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for getting valid page ranges.
-            If length is given, offset must be provided.
-            This range will return valid page ranges from the offset start up to
-            the specified length.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
-            The first element are filled page ranges, the 2nd element is cleared page ranges.
-        :rtype: tuple(list(dict(str, str), list(dict(str, str))
-        """
-        options = self._get_page_ranges_options(
-            offset=offset,
-            length=length,
-            prev_snapshot_url=previous_snapshot_url,
-            **kwargs)
-        try:
-            ranges = self._client.page_blob.get_page_ranges_diff(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return get_page_ranges_result(ranges)
-
-    def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs):
-        # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any]
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        if sequence_number_action is None:
-            raise ValueError("A sequence number action must be specified")
-        options = {
-            'sequence_number_action': sequence_number_action,
-            'timeout': kwargs.pop('timeout', None),
-            'blob_sequence_number': sequence_number,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs):
-        # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]]
-        """Sets the blob sequence number.
-
-        :param str sequence_number_action:
-            This property indicates how the service should modify the blob's sequence
-            number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
-        :param str sequence_number:
-            This property sets the blob's sequence number. The sequence number is a
-            user-controlled property that you can use to track requests and manage
-            concurrency issues.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._set_sequence_number_options(
-            sequence_number_action, sequence_number=sequence_number, **kwargs)
-        try:
-            return self._client.page_blob.update_sequence_number(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _resize_blob_options(self, size, **kwargs):
-        # type: (int, **Any) -> Dict[str, Any]
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        if size is None:
-            raise ValueError("A content length must be specified for a Page Blob.")
-
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        options = {
-            'blob_content_length': size,
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def resize_blob(self, size, **kwargs):
-        # type: (int, **Any) -> Dict[str, Union[str, datetime]]
-        """Resizes a page blob to the specified size.
-
-        If the specified value is less than the current size of the blob,
-        then all pages above the specified value are cleared.
-
-        :param int size:
-            Size used to resize blob. Maximum size for a page blob is up to 1 TB.
-            The page blob size must be aligned to a 512-byte boundary.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._resize_blob_options(size, **kwargs)
-        try:
-            return self._client.page_blob.resize(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _upload_page_options( # type: ignore
-            self, page,  # type: bytes
-            offset,  # type: int
-            length,  # type: int
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if isinstance(page, six.text_type):
-            page = page.encode(kwargs.pop('encoding', 'UTF-8'))
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-        if offset is None or offset % 512 != 0:
-            raise ValueError("offset must be an integer that aligns with 512 page size")
-        if length is None or length % 512 != 0:
-            raise ValueError("length must be an integer that aligns with 512 page size")
-        end_range = offset + length - 1  # Reformat to an inclusive range index
-        content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        seq_conditions = SequenceNumberAccessConditions(
-            if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
-            if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
-            if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
-        )
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        validate_content = kwargs.pop('validate_content', False)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        options = {
-            'body': page[:length],
-            'content_length': length,
-            'transactional_content_md5': None,
-            'timeout': kwargs.pop('timeout', None),
-            'range': content_range,
-            'lease_access_conditions': access_conditions,
-            'sequence_number_access_conditions': seq_conditions,
-            'modified_access_conditions': mod_conditions,
-            'validate_content': validate_content,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def upload_page( # type: ignore
-            self, page,  # type: bytes
-            offset,  # type: int
-            length,  # type: int
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """The Upload Pages operation writes a range of pages to a page blob.
-
-        :param bytes page:
-            Content of the page.
-        :param int offset:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length  must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https, as https (the default),
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._upload_page_options(
-            page=page,
-            offset=offset,
-            length=length,
-            **kwargs)
-        try:
-            return self._client.page_blob.upload_pages(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _upload_pages_from_url_options(  # type: ignore
-            self, source_url,  # type: str
-            offset,  # type: int
-            length,  # type: int
-            source_offset,  # type: int
-            **kwargs
-    ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-        # TODO: extract the code to a method format_range
-        if offset is None or offset % 512 != 0:
-            raise ValueError("offset must be an integer that aligns with 512 page size")
-        if length is None or length % 512 != 0:
-            raise ValueError("length must be an integer that aligns with 512 page size")
-        if source_offset is None or offset % 512 != 0:
-            raise ValueError("source_offset must be an integer that aligns with 512 page size")
-
-        # Format range
-        end_range = offset + length - 1
-        destination_range = 'bytes={0}-{1}'.format(offset, end_range)
-        source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1)  # should subtract 1 here?
-
-        seq_conditions = SequenceNumberAccessConditions(
-            if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
-            if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
-            if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
-        )
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        source_mod_conditions = get_source_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        source_content_md5 = kwargs.pop('source_content_md5', None)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'source_url': source_url,
-            'content_length': 0,
-            'source_range': source_range,
-            'range': destination_range,
-            'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None,
-            'timeout': kwargs.pop('timeout', None),
-            'lease_access_conditions': access_conditions,
-            'sequence_number_access_conditions': seq_conditions,
-            'modified_access_conditions': mod_conditions,
-            'source_modified_access_conditions': source_mod_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def upload_pages_from_url(self, source_url,  # type: str
-                              offset,  # type: int
-                              length,  # type: int
-                              source_offset,  # type: int
-                              **kwargs
-                              ):
-        # type: (...) -> Dict[str, Any]
-        """
-        The Upload Pages operation writes a range of pages to a page blob where
-        the contents are read from a URL.
-
-        :param str source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int offset:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length  must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int source_offset:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-            The service will read the same number of bytes as the destination range (length-offset).
-        :keyword bytes source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the source resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the source resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The destination match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        options = self._upload_pages_from_url_options(
-            source_url=source_url,
-            offset=offset,
-            length=length,
-            source_offset=source_offset,
-            **kwargs
-        )
-        try:
-            return self._client.page_blob.upload_pages_from_url(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _clear_page_options(self, offset, length, **kwargs):
-        # type: (int, int, **Any) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        seq_conditions = SequenceNumberAccessConditions(
-            if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
-            if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
-            if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
-        )
-        mod_conditions = get_modify_conditions(kwargs)
-        if offset is None or offset % 512 != 0:
-            raise ValueError("offset must be an integer that aligns with 512 page size")
-        if length is None or length % 512 != 0:
-            raise ValueError("length must be an integer that aligns with 512 page size")
-        end_range = length + offset - 1  # Reformat to an inclusive range index
-        content_range = 'bytes={0}-{1}'.format(offset, end_range)
-
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'content_length': 0,
-            'timeout': kwargs.pop('timeout', None),
-            'range': content_range,
-            'lease_access_conditions': access_conditions,
-            'sequence_number_access_conditions': seq_conditions,
-            'modified_access_conditions': mod_conditions,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def clear_page(self, offset, length, **kwargs):
-        # type: (int, int, **Any) -> Dict[str, Union[str, datetime]]
-        """Clears a range of pages.
-
-        :param int offset:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._clear_page_options(offset, length, **kwargs)
-        try:
-            return self._client.page_blob.clear_pages(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _append_block_options( # type: ignore
-            self, data,  # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-        if isinstance(data, six.text_type):
-            data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
-        if length is None:
-            length = get_length(data)
-            if length is None:
-                length, data = read_length(data)
-        if length == 0:
-            return {}
-        if isinstance(data, bytes):
-            data = data[:length]
-
-        appendpos_condition = kwargs.pop('appendpos_condition', None)
-        maxsize_condition = kwargs.pop('maxsize_condition', None)
-        validate_content = kwargs.pop('validate_content', False)
-        append_conditions = None
-        if maxsize_condition or appendpos_condition is not None:
-            append_conditions = AppendPositionAccessConditions(
-                max_size=maxsize_condition,
-                append_position=appendpos_condition
-            )
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        options = {
-            'body': data,
-            'content_length': length,
-            'timeout': kwargs.pop('timeout', None),
-            'transactional_content_md5': None,
-            'lease_access_conditions': access_conditions,
-            'append_position_access_conditions': append_conditions,
-            'modified_access_conditions': mod_conditions,
-            'validate_content': validate_content,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def append_block( # type: ignore
-            self, data,  # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime, int]]
-        """Commits a new block of data to the end of the existing append blob.
-
-        :param data:
-            Content of the block. This can be bytes, text, an iterable or a file-like object.
-        :type data: bytes or str or Iterable
-        :param int length:
-            Size of the block in bytes.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https, as https (the default),
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
-        :rtype: dict(str, Any)
-        """
-        options = self._append_block_options(
-            data,
-            length=length,
-            **kwargs
-        )
-        try:
-            return self._client.append_blob.append_block(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _append_block_from_url_options(  # type: ignore
-            self, copy_source_url,  # type: str
-            source_offset=None,  # type: Optional[int]
-            source_length=None,  # type: Optional[int]
-            **kwargs
-    ):
-        # type: (...) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-        # If end range is provided, start range must be provided
-        if source_length is not None and source_offset is None:
-            raise ValueError("source_offset should also be specified if source_length is specified")
-        # Format based on whether length is present
-        source_range = None
-        if source_length is not None:
-            end_range = source_offset + source_length - 1
-            source_range = 'bytes={0}-{1}'.format(source_offset, end_range)
-        elif source_offset is not None:
-            source_range = "bytes={0}-".format(source_offset)
-
-        appendpos_condition = kwargs.pop('appendpos_condition', None)
-        maxsize_condition = kwargs.pop('maxsize_condition', None)
-        source_content_md5 = kwargs.pop('source_content_md5', None)
-        append_conditions = None
-        if maxsize_condition or appendpos_condition is not None:
-            append_conditions = AppendPositionAccessConditions(
-                max_size=maxsize_condition,
-                append_position=appendpos_condition
-            )
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        source_mod_conditions = get_source_conditions(kwargs)
-        cpk_scope_info = get_cpk_scope_info(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-
-        options = {
-            'source_url': copy_source_url,
-            'content_length': 0,
-            'source_range': source_range,
-            'source_content_md5': source_content_md5,
-            'transactional_content_md5': None,
-            'lease_access_conditions': access_conditions,
-            'append_position_access_conditions': append_conditions,
-            'modified_access_conditions': mod_conditions,
-            'source_modified_access_conditions': source_mod_conditions,
-            'cpk_scope_info': cpk_scope_info,
-            'cpk_info': cpk_info,
-            'cls': return_response_headers,
-            'timeout': kwargs.pop('timeout', None)}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def append_block_from_url(self, copy_source_url,  # type: str
-                              source_offset=None,  # type: Optional[int]
-                              source_length=None,  # type: Optional[int]
-                              **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime, int]]
-        """
-        Creates a new block to be committed as part of a blob, where the contents are read from a source url.
-
-        :param str copy_source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int source_offset:
-            This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source.
-        :param int source_length:
-            This indicates the end of the range of bytes that has to be taken from the copy source.
-        :keyword bytearray source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The destination match condition to use upon the etag.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the source resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the source resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        options = self._append_block_from_url_options(
-            copy_source_url,
-            source_offset=source_offset,
-            source_length=source_length,
-            **kwargs
-        )
-        try:
-            return self._client.append_blob.append_block_from_url(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_service_client.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_service_client.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_service_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_blob_service_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,615 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List,
-    TYPE_CHECKING
-)
-
-try:
-    from urllib.parse import urlparse
-except ImportError:
-    from urlparse import urlparse # type: ignore
-
-from azure.core.paging import ItemPaged
-from azure.core.pipeline import Pipeline
-from azure.core.tracing.decorator import distributed_trace
-
-from ._shared.models import LocationMode
-from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
-from ._shared.parser import _to_utc_datetime
-from ._shared.response_handlers import return_response_headers, process_storage_error, \
-    parse_to_internal_user_delegation_key
-from ._generated import AzureBlobStorage, VERSION
-from ._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo
-from ._container_client import ContainerClient
-from ._blob_client import BlobClient
-from ._models import ContainerPropertiesPaged
-from ._serialize import get_api_version
-from ._deserialize import service_stats_deserialize, service_properties_deserialize
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.pipeline.transport import HttpTransport
-    from azure.core.pipeline.policies import HTTPPolicy
-    from ._shared.models import UserDelegationKey
-    from ._lease import BlobLeaseClient
-    from ._models import (
-        BlobProperties,
-        ContainerProperties,
-        PublicAccess,
-        BlobAnalyticsLogging,
-        Metrics,
-        CorsRule,
-        RetentionPolicy,
-        StaticWebsite,
-    )
-
-
-class BlobServiceClient(StorageAccountHostsMixin):
-    """A client to interact with the Blob Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete containers within the account.
-    For operations relating to a specific container or blob, clients for those entities
-    can also be retrieved using the `get_client` functions.
-
-    :param str account_url:
-        The URL to the blob storage account. Any other entities included
-        in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.2.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
-        Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be
-        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
-        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
-    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
-        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
-    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
-    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
-        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
-    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
-        or 4MB.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_authentication.py
-            :start-after: [START create_blob_service_client]
-            :end-before: [END create_blob_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/blob_samples_authentication.py
-            :start-after: [START create_blob_service_client_oauth]
-            :end-before: [END create_blob_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        _, sas_token = parse_query(parsed_url.query)
-        self._query_str, credential = self._format_query_string(sas_token, credential)
-        super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
-        self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        return "{}://{}/{}".format(self.scheme, hostname, self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> BlobServiceClient
-        """Create BlobServiceClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :returns: A Blob service client.
-        :rtype: ~azure.storage.blob.BlobServiceClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_authentication.py
-                :start-after: [START auth_from_connection_string]
-                :end-before: [END auth_from_connection_string]
-                :language: python
-                :dedent: 8
-                :caption: Creating the BlobServiceClient from a connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(account_url, credential=credential, **kwargs)
-
-    @distributed_trace
-    def get_user_delegation_key(self, key_start_time,  # type: datetime
-                                key_expiry_time,  # type: datetime
-                                **kwargs  # type: Any
-                                ):
-        # type: (...) -> UserDelegationKey
-        """
-        Obtain a user delegation key for the purpose of signing SAS tokens.
-        A token credential must be present on the service object for this request to succeed.
-
-        :param ~datetime.datetime key_start_time:
-            A DateTime value. Indicates when the key becomes valid.
-        :param ~datetime.datetime key_expiry_time:
-            A DateTime value. Indicates when the key stops being valid.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The user delegation key.
-        :rtype: ~azure.storage.blob.UserDelegationKey
-        """
-        key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info,
-                                                                               timeout=timeout,
-                                                                               **kwargs)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-        return parse_to_internal_user_delegation_key(user_delegation_key)  # type: ignore
-
-    @distributed_trace
-    def get_account_information(self, **kwargs):
-        # type: (Any) -> Dict[str, str]
-        """Gets information related to the storage account.
-
-        The information can also be retrieved if the user has a SAS to a container or blob.
-        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
-
-        :returns: A dict of account information (SKU and account type).
-        :rtype: dict(str, str)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START get_blob_service_account_info]
-                :end-before: [END get_blob_service_account_info]
-                :language: python
-                :dedent: 8
-                :caption: Getting account information for the blob service.
-        """
-        try:
-            return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_service_stats(self, **kwargs):
-        # type: (**Any) -> Dict[str, Any]
-        """Retrieves statistics related to replication for the Blob service.
-
-        It is only available when read-access geo-redundant replication is enabled for
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable
-        in two locations. In both locations, Azure Storage constantly maintains
-        multiple healthy replicas of your data. The location where you read,
-        create, update, or delete data is the primary storage account location.
-        The primary location exists in the region you choose at the time you
-        create an account via the Azure Management Azure classic portal, for
-        example, North Central US. The location to which your data is replicated
-        is the secondary location. The secondary location is automatically
-        determined based on the location of the primary; it is in a second data
-        center that resides in the same region as the primary location. Read-only
-        access is available from the secondary location, if read-access geo-redundant
-        replication is enabled for your storage account.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service stats.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START get_blob_service_stats]
-                :end-before: [END get_blob_service_stats]
-                :language: python
-                :dedent: 8
-                :caption: Getting service stats for the blob service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            stats = self._client.service.get_statistics( # type: ignore
-                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
-            return service_stats_deserialize(stats)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_service_properties(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An object containing blob service properties such as
-            analytics logging, hour/minute metrics, cors rules, etc.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START get_blob_service_properties]
-                :end-before: [END get_blob_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting service properties for the blob service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            service_props = self._client.service.get_properties(timeout=timeout, **kwargs)
-            return service_properties_deserialize(service_props)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_service_properties(
-            self, analytics_logging=None,  # type: Optional[BlobAnalyticsLogging]
-            hour_metrics=None,  # type: Optional[Metrics]
-            minute_metrics=None,  # type: Optional[Metrics]
-            cors=None,  # type: Optional[List[CorsRule]]
-            target_version=None,  # type: Optional[str]
-            delete_retention_policy=None,  # type: Optional[RetentionPolicy]
-            static_website=None,  # type: Optional[StaticWebsite]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Sets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        If an element (e.g. analytics_logging) is left as None, the
-        existing settings on the service for that functionality are preserved.
-
-        :param analytics_logging:
-            Groups the Azure Analytics Logging settings.
-        :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request
-            statistics grouped by API in hourly aggregates for blobs.
-        :type hour_metrics: ~azure.storage.blob.Metrics
-        :param minute_metrics:
-            The minute metrics settings provide request statistics
-            for each minute for blobs.
-        :type minute_metrics: ~azure.storage.blob.Metrics
-        :param cors:
-            You can include up to five CorsRule elements in the
-            list. If an empty list is specified, all CORS rules will be deleted,
-            and CORS will be disabled for the service.
-        :type cors: list[~azure.storage.blob.CorsRule]
-        :param str target_version:
-            Indicates the default version to use for requests if an incoming
-            request's version is not specified.
-        :param delete_retention_policy:
-            The delete retention policy specifies whether to retain deleted blobs.
-            It also specifies the number of days and versions of blob to keep.
-        :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
-        :param static_website:
-            Specifies whether the static website feature is enabled,
-            and if yes, indicates the index document and 404 error document to use.
-        :type static_website: ~azure.storage.blob.StaticWebsite
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START set_blob_service_properties]
-                :end-before: [END set_blob_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Setting service properties for the blob service.
-        """
-        props = StorageServiceProperties(
-            logging=analytics_logging,
-            hour_metrics=hour_metrics,
-            minute_metrics=minute_metrics,
-            cors=cors,
-            default_service_version=target_version,
-            delete_retention_policy=delete_retention_policy,
-            static_website=static_website
-        )
-        timeout = kwargs.pop('timeout', None)
-        try:
-            self._client.service.set_properties(props, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_containers(
-            self, name_starts_with=None,  # type: Optional[str]
-            include_metadata=False,  # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> ItemPaged[ContainerProperties]
-        """Returns a generator to list the containers under the specified account.
-
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all containers have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param bool include_metadata:
-            Specifies that container metadata to be returned in the response.
-            The default value is `False`.
-        :keyword int results_per_page:
-            The maximum number of container names to retrieve per API
-            call. If the request does not specify the server will return up to 5,000 items.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) of ContainerProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START bsc_list_containers]
-                :end-before: [END bsc_list_containers]
-                :language: python
-                :dedent: 12
-                :caption: Listing the containers in the blob service.
-        """
-        include = 'metadata' if include_metadata else None
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.service.list_containers_segment,
-            prefix=name_starts_with,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-                command,
-                prefix=name_starts_with,
-                results_per_page=results_per_page,
-                page_iterator_class=ContainerPropertiesPaged
-            )
-
-    @distributed_trace
-    def create_container(
-            self, name,  # type: str
-            metadata=None,  # type: Optional[Dict[str, str]]
-            public_access=None,  # type: Optional[Union[PublicAccess, str]]
-            **kwargs
-        ):
-        # type: (...) -> ContainerClient
-        """Creates a new container under the specified account.
-
-        If the container with the same name already exists, a ResourceExistsError will
-        be raised. This method returns a client with which to interact with the newly
-        created container.
-
-        :param str name: The name of the container to create.
-        :param metadata:
-            A dict with name-value pairs to associate with the
-            container as metadata. Example: `{'Category':'test'}`
-        :type metadata: dict(str, str)
-        :param public_access:
-            Possible values include: 'container', 'blob'.
-        :type public_access: str or ~azure.storage.blob.PublicAccess
-        :keyword container_encryption_scope:
-            Specifies the default encryption scope to set on the container and use for
-            all future writes.
-
-            .. versionadded:: 12.2.0
-
-        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.blob.ContainerClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START bsc_create_container]
-                :end-before: [END bsc_create_container]
-                :language: python
-                :dedent: 12
-                :caption: Creating a container in the blob service.
-        """
-        container = self.get_container_client(name)
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        container.create_container(
-            metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
-        return container
-
-    @distributed_trace
-    def delete_container(
-            self, container,  # type: Union[ContainerProperties, str]
-            lease=None,  # type: Optional[Union[BlobLeaseClient, str]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified container for deletion.
-
-        The container and any blobs contained within it are later deleted during garbage collection.
-        If the container is not found, a ResourceNotFoundError will be raised.
-
-        :param container:
-            The container to delete. This can either be the name of the container,
-            or an instance of ContainerProperties.
-        :type container: str or ~azure.storage.blob.ContainerProperties
-        :param lease:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START bsc_delete_container]
-                :end-before: [END bsc_delete_container]
-                :language: python
-                :dedent: 12
-                :caption: Deleting a container in the blob service.
-        """
-        container = self.get_container_client(container) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        container.delete_container( # type: ignore
-            lease=lease,
-            timeout=timeout,
-            **kwargs)
-
-    def get_container_client(self, container):
-        # type: (Union[ContainerProperties, str]) -> ContainerClient
-        """Get a client to interact with the specified container.
-
-        The container need not already exist.
-
-        :param container:
-            The container. This can either be the name of the container,
-            or an instance of ContainerProperties.
-        :type container: str or ~azure.storage.blob.ContainerProperties
-        :returns: A ContainerClient.
-        :rtype: ~azure.storage.blob.ContainerClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START bsc_get_container_client]
-                :end-before: [END bsc_get_container_client]
-                :language: python
-                :dedent: 8
-                :caption: Getting the container client to interact with a specific container.
-        """
-        try:
-            container_name = container.name
-        except AttributeError:
-            container_name = container
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ContainerClient(
-            self.url, container_name=container_name,
-            credential=self.credential, api_version=self.api_version, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-            require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-
-    def get_blob_client(
-            self, container,  # type: Union[ContainerProperties, str]
-            blob,  # type: Union[BlobProperties, str]
-            snapshot=None  # type: Optional[Union[Dict[str, Any], str]]
-        ):
-        # type: (...) -> BlobClient
-        """Get a client to interact with the specified blob.
-
-        The blob need not already exist.
-
-        :param container:
-            The container that the blob is in. This can either be the name of the container,
-            or an instance of ContainerProperties.
-        :type container: str or ~azure.storage.blob.ContainerProperties
-        :param blob:
-            The blob with which to interact. This can either be the name of the blob,
-            or an instance of BlobProperties.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param snapshot:
-            The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
-            or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`.
-        :type snapshot: str or dict(str, Any)
-        :returns: A BlobClient.
-        :rtype: ~azure.storage.blob.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service.py
-                :start-after: [START bsc_get_blob_client]
-                :end-before: [END bsc_get_blob_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the blob client to interact with a specific blob.
-        """
-        try:
-            container_name = container.name
-        except AttributeError:
-            container_name = container
-        try:
-            blob_name = blob.name
-        except AttributeError:
-            blob_name = blob
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return BlobClient( # type: ignore
-            self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
-            credential=self.credential, api_version=self.api_version, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-            require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_container_client.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_container_client.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_container_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_container_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1312 +0,0 @@
-# pylint: disable=too-many-lines
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator,
-    TYPE_CHECKING
-)
-
-try:
-    from urllib.parse import urlparse, quote, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-
-from azure.core.paging import ItemPaged
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.pipeline import Pipeline
-from azure.core.pipeline.transport import HttpRequest
-
-from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
-from ._shared.request_handlers import add_metadata_headers, serialize_iso
-from ._shared.response_handlers import (
-    process_storage_error,
-    return_response_headers,
-    return_headers_and_deserialized)
-from ._generated import AzureBlobStorage, VERSION
-from ._generated.models import (
-    StorageErrorException,
-    SignedIdentifier)
-from ._deserialize import deserialize_container_properties
-from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version
-from ._models import ( # pylint: disable=unused-import
-    ContainerProperties,
-    BlobProperties,
-    BlobPropertiesPaged,
-    BlobType,
-    BlobPrefix)
-from ._lease import BlobLeaseClient, get_access_conditions
-from ._blob_client import BlobClient
-
-if TYPE_CHECKING:
-    from azure.core.pipeline.transport import HttpTransport, HttpResponse  # pylint: disable=ungrouped-imports
-    from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports
-    from datetime import datetime
-    from ._models import (  # pylint: disable=unused-import
-        PublicAccess,
-        AccessPolicy,
-        ContentSettings,
-        StandardBlobTier,
-        PremiumPageBlobTier)
-
-
-def _get_blob_name(blob):
-    """Return the blob name.
-
-    :param blob: A blob string or BlobProperties
-    :rtype: str
-    """
-    try:
-        return blob.name
-    except AttributeError:
-        return blob
-
-
-class ContainerClient(StorageAccountHostsMixin):
-    """A client to interact with a specific container, although that container
-    may not yet exist.
-
-    For operations relating to a specific blob within this container, a blob client can be
-    retrieved using the :func:`~get_blob_client` function.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the container,
-        use the :func:`from_container_url` classmethod.
-    :param container_name:
-        The name of the container for the blob.
-    :type container_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.2.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
-        Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be
-        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
-        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
-    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
-        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
-    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
-    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
-        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
-    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
-        or 4MB.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_containers.py
-            :start-after: [START create_container_client_from_service]
-            :end-before: [END create_container_client_from_service]
-            :language: python
-            :dedent: 8
-            :caption: Get a ContainerClient from an existing BlobServiceClient.
-
-        .. literalinclude:: ../samples/blob_samples_containers.py
-            :start-after: [START create_container_client_sasurl]
-            :end-before: [END create_container_client_sasurl]
-            :language: python
-            :dedent: 8
-            :caption: Creating the container client directly.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            container_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Container URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not container_name:
-            raise ValueError("Please specify a container name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        _, sas_token = parse_query(parsed_url.query)
-        self.container_name = container_name
-        self._query_str, credential = self._format_query_string(sas_token, credential)
-        super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
-        self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    def _format_url(self, hostname):
-        container_name = self.container_name
-        if isinstance(container_name, six.text_type):
-            container_name = container_name.encode('UTF-8')
-        return "{}://{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(container_name),
-            self._query_str)
-
-    @classmethod
-    def from_container_url(cls, container_url, credential=None, **kwargs):
-        # type: (str, Optional[Any], Any) -> ContainerClient
-        """Create ContainerClient from a container url.
-
-        :param str container_url:
-            The full endpoint URL to the Container, including SAS token if used. This could be
-            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
-        :type container_url: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :returns: A container client.
-        :rtype: ~azure.storage.blob.ContainerClient
-        """
-        try:
-            if not container_url.lower().startswith('http'):
-                container_url = "https://" + container_url
-        except AttributeError:
-            raise ValueError("Container URL must be a string.")
-        parsed_url = urlparse(container_url.rstrip('/'))
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(container_url))
-
-        container_path = parsed_url.path.lstrip('/').split('/')
-        account_path = ""
-        if len(container_path) > 1:
-            account_path = "/" + "/".join(container_path[:-1])
-        account_url = "{}://{}{}?{}".format(
-            parsed_url.scheme,
-            parsed_url.netloc.rstrip('/'),
-            account_path,
-            parsed_url.query)
-        container_name = unquote(container_path[-1])
-        if not container_name:
-            raise ValueError("Invalid URL. Please provide a URL with a valid container name")
-        return cls(account_url, container_name=container_name, credential=credential, **kwargs)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            container_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> ContainerClient
-        """Create ContainerClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param container_name:
-            The container name for the blob.
-        :type container_name: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :returns: A container client.
-        :rtype: ~azure.storage.blob.ContainerClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_authentication.py
-                :start-after: [START auth_from_connection_string_container]
-                :end-before: [END auth_from_connection_string_container]
-                :language: python
-                :dedent: 8
-                :caption: Creating the ContainerClient from a connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, container_name=container_name, credential=credential, **kwargs)
-
-    @distributed_trace
-    def create_container(self, metadata=None, public_access=None, **kwargs):
-        # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None
-        """
-        Creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails.
-
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            container as metadata. Example:{'Category':'test'}
-        :type metadata: dict[str, str]
-        :param ~azure.storage.blob.PublicAccess public_access:
-            Possible values include: 'container', 'blob'.
-        :keyword container_encryption_scope:
-            Specifies the default encryption scope to set on the container and use for
-            all future writes.
-
-            .. versionadded:: 12.2.0
-
-        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START create_container]
-                :end-before: [END create_container]
-                :language: python
-                :dedent: 12
-                :caption: Creating a container to store blobs.
-        """
-        headers = kwargs.pop('headers', {})
-        timeout = kwargs.pop('timeout', None)
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
-        try:
-            return self._client.container.create( # type: ignore
-                timeout=timeout,
-                access=public_access,
-                container_cpk_scope_info=container_cpk_scope_info,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def delete_container(
-            self, **kwargs):
-        # type: (Any) -> None
-        """
-        Marks the specified container for deletion. The container and any blobs
-        contained within it are later deleted during garbage collection.
-
-        :keyword lease:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START delete_container]
-                :end-before: [END delete_container]
-                :language: python
-                :dedent: 12
-                :caption: Delete a container.
-        """
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        mod_conditions = get_modify_conditions(kwargs)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            self._client.container.delete(
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def acquire_lease(
-            self, lease_duration=-1,  # type: int
-            lease_id=None,  # type: Optional[str]
-            **kwargs):
-        # type: (...) -> BlobLeaseClient
-        """
-        Requests a new lease. If the container does not have an active lease,
-        the Blob service creates a lease on the container and returns a new
-        lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A BlobLeaseClient object, that can be run in a context manager.
-        :rtype: ~azure.storage.blob.BlobLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START acquire_lease_on_container]
-                :end-before: [END acquire_lease_on_container]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on the container.
-        """
-        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
-        return lease
-
-    @distributed_trace
-    def get_account_information(self, **kwargs):
-        # type: (**Any) -> Dict[str, str]
-        """Gets information related to the storage account.
-
-        The information can also be retrieved if the user has a SAS to a container or blob.
-        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
-
-        :returns: A dict of account information (SKU and account type).
-        :rtype: dict(str, str)
-        """
-        try:
-            return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_container_properties(self, **kwargs):
-        # type: (Any) -> ContainerProperties
-        """Returns all user-defined metadata and system properties for the specified
-        container. The data returned does not include the container's list of blobs.
-
-        :keyword lease:
-            If specified, get_container_properties only succeeds if the
-            container's lease is active and matches this ID.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Properties for the specified container within a container object.
-        :rtype: ~azure.storage.blob.ContainerProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START get_container_properties]
-                :end-before: [END get_container_properties]
-                :language: python
-                :dedent: 12
-                :caption: Getting properties on the container.
-        """
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response = self._client.container.get_properties(
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                cls=deserialize_container_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        response.name = self.container_name
-        return response # type: ignore
-
-    @distributed_trace
-    def set_container_metadata( # type: ignore
-            self, metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets one or more user-defined name-value pairs for the specified
-        container. Each call to this operation replaces all existing metadata
-        attached to the container. To remove all metadata from the container,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the container as
-            metadata. Example: {'category':'test'}
-        :type metadata: dict[str, str]
-        :keyword lease:
-            If specified, set_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Container-updated property dict (Etag and last modified).
-        :rtype: dict[str, str or datetime]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START set_container_metadata]
-                :end-before: [END set_container_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Setting metadata on the container.
-        """
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        mod_conditions = get_modify_conditions(kwargs)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return self._client.container.set_metadata( # type: ignore
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_container_access_policy(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the permissions for the specified container.
-        The permissions indicate whether container data may be accessed publicly.
-
-        :keyword lease:
-            If specified, get_container_access_policy only succeeds if the
-            container's lease is active and matches this ID.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Access policy information in a dict.
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START get_container_access_policy]
-                :end-before: [END get_container_access_policy]
-                :language: python
-                :dedent: 12
-                :caption: Getting the access policy on the container.
-        """
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response, identifiers = self._client.container.get_access_policy(
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                cls=return_headers_and_deserialized,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return {
-            'public_access': response.get('blob_public_access'),
-            'signed_identifiers': identifiers or []
-        }
-
-    @distributed_trace
-    def set_container_access_policy(
-            self, signed_identifiers,  # type: Dict[str, AccessPolicy]
-            public_access=None,  # type: Optional[Union[str, PublicAccess]]
-            **kwargs
-        ):  # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets the permissions for the specified container or stored access
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the container. The
-            dictionary may contain up to 5 elements. An empty dictionary
-            will clear the access policies set on the service.
-        :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
-        :param ~azure.storage.blob.PublicAccess public_access:
-            Possible values include: 'container', 'blob'.
-        :keyword lease:
-            Required if the container has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified date/time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Container-updated property dict (Etag and last modified).
-        :rtype: dict[str, str or ~datetime.datetime]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START set_container_access_policy]
-                :end-before: [END set_container_access_policy]
-                :language: python
-                :dedent: 12
-                :caption: Setting access policy on the container.
-        """
-        if len(signed_identifiers) > 5:
-            raise ValueError(
-                'Too many access policies provided. The server does not support setting '
-                'more than 5 access policies on a single resource.')
-        identifiers = []
-        for key, value in signed_identifiers.items():
-            if value:
-                value.start = serialize_iso(value.start)
-                value.expiry = serialize_iso(value.expiry)
-            identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
-        signed_identifiers = identifiers # type: ignore
-        lease = kwargs.pop('lease', None)
-        mod_conditions = get_modify_conditions(kwargs)
-        access_conditions = get_access_conditions(lease)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return self._client.container.set_access_policy(
-                container_acl=signed_identifiers or None,
-                timeout=timeout,
-                access=public_access,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_blobs(self, name_starts_with=None, include=None, **kwargs):
-        # type: (Optional[str], Optional[Any], **Any) -> ItemPaged[BlobProperties]
-        """Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service.
-
-        :param str name_starts_with:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param list[str] include:
-            Specifies one or more additional datasets to include in the response.
-            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) response of BlobProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START list_blobs_in_container]
-                :end-before: [END list_blobs_in_container]
-                :language: python
-                :dedent: 8
-                :caption: List the blobs in the container.
-        """
-        if include and not isinstance(include, list):
-            include = [include]
-
-        results_per_page = kwargs.pop('results_per_page', None)
-        timeout = kwargs.pop('timeout', None)
-        command = functools.partial(
-            self._client.container.list_blob_flat_segment,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=BlobPropertiesPaged)
-
-    @distributed_trace
-    def walk_blobs(
-            self, name_starts_with=None, # type: Optional[str]
-            include=None, # type: Optional[Any]
-            delimiter="/", # type: str
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> ItemPaged[BlobProperties]
-        """Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service. This operation will list blobs in accordance with a hierarchy,
-        as delimited by the specified delimiter character.
-
-        :param str name_starts_with:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param list[str] include:
-            Specifies one or more additional datasets to include in the response.
-            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
-        :param str delimiter:
-            When the request includes this parameter, the operation returns a BlobPrefix
-            element in the response body that acts as a placeholder for all blobs whose
-            names begin with the same substring up to the appearance of the delimiter
-            character. The delimiter may be a single character or a string.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) response of BlobProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
-        """
-        if include and not isinstance(include, list):
-            include = [include]
-
-        results_per_page = kwargs.pop('results_per_page', None)
-        timeout = kwargs.pop('timeout', None)
-        command = functools.partial(
-            self._client.container.list_blob_hierarchy_segment,
-            delimiter=delimiter,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return BlobPrefix(
-            command,
-            prefix=name_starts_with,
-            results_per_page=results_per_page,
-            delimiter=delimiter)
-
-    @distributed_trace
-    def upload_blob(
-            self, name,  # type: Union[str, BlobProperties]
-            data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            blob_type=BlobType.BlockBlob,  # type: Union[str, BlobType]
-            length=None,  # type: Optional[int]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> BlobClient
-        """Creates a new blob from a data source with automatic chunking.
-
-        :param name: The blob with which to interact. If specified, this value will override
-            a blob value specified in the blob URL.
-        :type name: str or ~azure.storage.blob.BlobProperties
-        :param data: The blob data to upload.
-        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
-            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
-            If True, upload_blob will overwrite the existing data. If set to False, the
-            operation will fail with ResourceExistsError. The exception to the above is with Append
-            blob types: if set to False and the data already exists, an error will not be raised
-            and the data will be appended to the existing blob. If set overwrite=True, then the existing
-            append blob will be deleted, and a new one created. Defaults to False.
-        :keyword ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the container has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: A BlobClient to interact with the newly uploaded blob.
-        :rtype: ~azure.storage.blob.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START upload_blob_to_container]
-                :end-before: [END upload_blob_to_container]
-                :language: python
-                :dedent: 8
-                :caption: Upload blob to the container.
-        """
-        blob = self.get_blob_client(name)
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        blob.upload_blob(
-            data,
-            blob_type=blob_type,
-            length=length,
-            metadata=metadata,
-            timeout=timeout,
-            encoding=encoding,
-            **kwargs
-        )
-        return blob
-
-    @distributed_trace
-    def delete_blob(
-            self, blob,  # type: Union[str, BlobProperties]
-            delete_snapshots=None,  # type: Optional[str]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified blob or snapshot for deletion.
-
-        The blob is later deleted during garbage collection.
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the delete_blob
-        operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
-        and retains the blob or snapshot for specified number of days.
-        After specified number of days, blob's data is removed from the service during garbage collection.
-        Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]`
-        option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()`
-
-        :param blob: The blob with which to interact. If specified, this value will override
-            a blob value specified in the blob URL.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param str delete_snapshots:
-            Required if the blob has associated snapshots. Values include:
-             - "only": Deletes only the blobs snapshots.
-             - "include": Deletes the blob along with all snapshots.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        blob_client = self.get_blob_client(blob) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        blob_client.delete_blob( # type: ignore
-            delete_snapshots=delete_snapshots,
-            timeout=timeout,
-            **kwargs)
-
-    @distributed_trace
-    def download_blob(self, blob, offset=None, length=None, **kwargs):
-        # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader
-        """Downloads a blob to the StorageStreamDownloader. The readall() method must
-        be used to read all the content or readinto() must be used to download the blob into
-        a stream.
-
-        :param blob: The blob with which to interact. If specified, this value will override
-            a blob value specified in the blob URL.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param int offset:
-            Start of byte range to use for downloading a section of the blob.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, download_blob only
-            succeeds if the blob's lease is active and matches this ID. Value can be a
-            BlobLeaseClient object or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :keyword str encoding:
-            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: A streaming object (StorageStreamDownloader)
-        :rtype: ~azure.storage.blob.StorageStreamDownloader
-        """
-        blob_client = self.get_blob_client(blob) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        return blob_client.download_blob(offset=offset, length=length, **kwargs)
-
-    def _generate_delete_blobs_options(
-        self, snapshot=None,
-        delete_snapshots=None,
-        request_id=None,
-        lease_access_conditions=None,
-        modified_access_conditions=None,
-        **kwargs
-    ):
-        """This code is a copy from _generated.
-
-        Once Autorest is able to provide request preparation this code should be removed.
-        """
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct parameters
-        timeout = kwargs.pop('timeout', None)
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str')  # pylint: disable=protected-access
-        if timeout is not None:
-            query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0)  # pylint: disable=protected-access
-
-        # Construct headers
-        header_parameters = {}
-        if delete_snapshots is not None:
-            header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "if_none_match", if_none_match, 'str')
-
-        return query_parameters, header_parameters
-
-    @distributed_trace
-    def delete_blobs(self, *blobs, **kwargs):
-        # type: (...) -> Iterator[HttpResponse]
-        """Marks the specified blobs or snapshots for deletion.
-
-        The blobs are later deleted during garbage collection.
-        Note that in order to delete blobs, you must delete all of their
-        snapshots. You can delete both at the same time with the delete_blobs operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
-        and retains the blobs or snapshots for specified number of days.
-        After specified number of days, blobs' data is removed from the service during garbage collection.
-        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
-        Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()`
-
-        :param blobs: The blobs to delete. This can be a single blob, or multiple values can
-            be supplied, where each value is either the name of the blob (str) or BlobProperties.
-        :type blobs: str or ~azure.storage.blob.BlobProperties
-        :keyword str delete_snapshots:
-            Required if a blob has associated snapshots. Values include:
-             - "only": Deletes only the blobs snapshots.
-             - "include": Deletes the blob along with all snapshots.
-        :keyword lease:
-            Required if a blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword bool raise_on_any_failure:
-            This is a boolean param which defaults to True. When this is set, an exception
-            is raised even if there is a single operation failure.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: An iterator of responses, one for each blob in order
-        :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START delete_multiple_blobs]
-                :end-before: [END delete_multiple_blobs]
-                :language: python
-                :dedent: 8
-                :caption: Deleting multiple blobs.
-        """
-        raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
-        options = BlobClient._generic_delete_blob_options(  # pylint: disable=protected-access
-            **kwargs
-        )
-        options.update({'raise_on_any_failure': raise_on_any_failure})
-        query_parameters, header_parameters = self._generate_delete_blobs_options(**options)
-        # To pass kwargs to "_batch_send", we need to remove anything that was
-        # in the Autorest signature for Autorest, otherwise transport will be upset
-        for possible_param in ['timeout', 'delete_snapshots', 'lease_access_conditions', 'modified_access_conditions']:
-            options.pop(possible_param, None)
-
-        reqs = []
-        for blob in blobs:
-            blob_name = _get_blob_name(blob)
-            req = HttpRequest(
-                "DELETE",
-                "/{}/{}".format(self.container_name, blob_name),
-                headers=header_parameters
-            )
-            req.format_parameters(query_parameters)
-            reqs.append(req)
-
-        return self._batch_send(*reqs, **options)
-
-    def _generate_set_tier_options(
-        self, tier, rehydrate_priority=None, request_id=None, lease_access_conditions=None, **kwargs
-    ):
-        """This code is a copy from _generated.
-
-        Once Autorest is able to provide request preparation this code should be removed.
-        """
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "tier"
-        timeout = kwargs.pop('timeout', None)
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0)  # pylint: disable=protected-access
-        query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str')  # pylint: disable=protected-access, specify-parameter-names-in-call
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str')  # pylint: disable=protected-access, specify-parameter-names-in-call
-        if rehydrate_priority is not None:
-            header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "rehydrate_priority", rehydrate_priority, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._client._serialize.header(  # pylint: disable=protected-access
-                "request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str')  # pylint: disable=protected-access
-
-        return query_parameters, header_parameters
-
-    @distributed_trace
-    def set_standard_blob_tier_blobs(
-        self,
-        standard_blob_tier,  # type: Union[str, StandardBlobTier]
-        *blobs,  # type: Union[str, BlobProperties]
-        **kwargs
-    ):
-        # type: (...) -> Iterator[HttpResponse]
-        """This operation sets the tier on block blobs.
-
-        A block blob's tier determines Hot/Cool/Archive storage type.
-        This operation does not update the blob's ETag.
-
-        :param standard_blob_tier:
-            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
-            'Archive'. The hot tier is optimized for storing data that is accessed
-            frequently. The cool storage tier is optimized for storing data that
-            is infrequently accessed and stored for at least a month. The archive
-            tier is optimized for storing data that is rarely accessed and stored
-            for at least six months with flexible latency requirements.
-        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
-        :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
-            be supplied, where each value is either the name of the blob (str) or BlobProperties.
-        :type blobs: str or ~azure.storage.blob.BlobProperties
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword bool raise_on_any_failure:
-            This is a boolean param which defaults to True. When this is set, an exception
-            is raised even if there is a single operation failure.
-        :return: An iterator of responses, one for each blob in order
-        :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if standard_blob_tier is None:
-            raise ValueError("A StandardBlobTier must be specified")
-
-        query_parameters, header_parameters = self._generate_set_tier_options(
-            tier=standard_blob_tier,
-            lease_access_conditions=access_conditions,
-            **kwargs
-        )
-        # To pass kwargs to "_batch_send", we need to remove anything that was
-        # in the Autorest signature for Autorest, otherwise transport will be upset
-        for possible_param in ['timeout', 'lease']:
-            kwargs.pop(possible_param, None)
-
-        reqs = []
-        for blob in blobs:
-            blob_name = _get_blob_name(blob)
-            req = HttpRequest(
-                "PUT",
-                "/{}/{}".format(self.container_name, blob_name),
-                headers=header_parameters
-            )
-            req.format_parameters(query_parameters)
-            reqs.append(req)
-
-        return self._batch_send(*reqs, **kwargs)
-
-    @distributed_trace
-    def set_premium_page_blob_tier_blobs(
-        self,
-        premium_page_blob_tier,  # type: Union[str, PremiumPageBlobTier]
-        *blobs,  # type: Union[str, BlobProperties]
-        **kwargs
-    ):
-        # type: (...) -> Iterator[HttpResponse]
-        """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts.
-
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
-        :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
-            be supplied, where each value is either the name of the blob (str) or BlobProperties.
-        :type blobs: str or ~azure.storage.blob.BlobProperties
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword bool raise_on_any_failure:
-            This is a boolean param which defaults to True. When this is set, an exception
-            is raised even if there is a single operation failure.
-        :return: An iterator of responses, one for each blob in order
-        :rtype: iterator[~azure.core.pipeline.transport.HttpResponse]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if premium_page_blob_tier is None:
-            raise ValueError("A PremiumPageBlobTier must be specified")
-
-        query_parameters, header_parameters = self._generate_set_tier_options(
-            tier=premium_page_blob_tier,
-            lease_access_conditions=access_conditions,
-            **kwargs
-        )
-        # To pass kwargs to "_batch_send", we need to remove anything that was
-        # in the Autorest signature for Autorest, otherwise transport will be upset
-        for possible_param in ['timeout', 'lease']:
-            kwargs.pop(possible_param, None)
-
-        reqs = []
-        for blob in blobs:
-            blob_name = _get_blob_name(blob)
-            req = HttpRequest(
-                "PUT",
-                "/{}/{}".format(self.container_name, blob_name),
-                headers=header_parameters
-            )
-            req.format_parameters(query_parameters)
-            reqs.append(req)
-
-        return self._batch_send(*reqs, **kwargs)
-
-    def get_blob_client(
-            self, blob,  # type: Union[str, BlobProperties]
-            snapshot=None  # type: str
-        ):
-        # type: (...) -> BlobClient
-        """Get a client to interact with the specified blob.
-
-        The blob need not already exist.
-
-        :param blob:
-            The blob with which to interact.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param str snapshot:
-            The optional blob snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`~BlobClient.create_snapshot()`.
-        :returns: A BlobClient.
-        :rtype: ~azure.storage.blob.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers.py
-                :start-after: [START get_blob_client]
-                :end-before: [END get_blob_client]
-                :language: python
-                :dedent: 8
-                :caption: Get the blob client.
-        """
-        blob_name = _get_blob_name(blob)
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return BlobClient(
-            self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
-            credential=self.credential, api_version=self.api_version, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-            require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_deserialize.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_deserialize.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_deserialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_deserialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,83 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from typing import (  # pylint: disable=unused-import
-    Tuple, Dict, List,
-    TYPE_CHECKING
-)
-
-from ._shared.response_handlers import deserialize_metadata
-from ._models import BlobProperties, ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
-    StaticWebsite
-
-if TYPE_CHECKING:
-    from ._generated.models import PageList
-
-
-def deserialize_blob_properties(response, obj, headers):
-    metadata = deserialize_metadata(response, obj, headers)
-    blob_properties = BlobProperties(
-        metadata=metadata,
-        **headers
-    )
-    if 'Content-Range' in headers:
-        if 'x-ms-blob-content-md5' in headers:
-            blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
-        else:
-            blob_properties.content_settings.content_md5 = None
-    return blob_properties
-
-
-def deserialize_blob_stream(response, obj, headers):
-    blob_properties = deserialize_blob_properties(response, obj, headers)
-    obj.properties = blob_properties
-    return response.location_mode, obj
-
-
-def deserialize_container_properties(response, obj, headers):
-    metadata = deserialize_metadata(response, obj, headers)
-    container_properties = ContainerProperties(
-        metadata=metadata,
-        **headers
-    )
-    return container_properties
-
-
-def get_page_ranges_result(ranges):
-    # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
-    page_range = [] # type: ignore
-    clear_range = [] # type: List
-    if ranges.page_range:
-        page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore
-    if ranges.clear_range:
-        clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
-    return page_range, clear_range  # type: ignore
-
-
-def service_stats_deserialize(generated):
-    """Deserialize a ServiceStats objects into a dict.
-    """
-    return {
-        'geo_replication': {
-            'status': generated.geo_replication.status,
-            'last_sync_time': generated.geo_replication.last_sync_time,
-        }
-    }
-
-
-def service_properties_deserialize(generated):
-    """Deserialize a ServiceProperties objects into a dict.
-    """
-    return {
-        'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging),  # pylint: disable=protected-access
-        'hour_metrics': Metrics._from_generated(generated.hour_metrics),  # pylint: disable=protected-access
-        'minute_metrics': Metrics._from_generated(generated.minute_metrics),  # pylint: disable=protected-access
-        'cors': [CorsRule._from_generated(cors) for cors in generated.cors],  # pylint: disable=protected-access
-        'target_version': generated.default_service_version,  # pylint: disable=protected-access
-        'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy),  # pylint: disable=protected-access
-        'static_website': StaticWebsite._from_generated(generated.static_website),  # pylint: disable=protected-access
-    }
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_download.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_download.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_download.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_download.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,579 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-import threading
-import warnings
-from io import BytesIO
-
-from azure.core.exceptions import HttpResponseError
-from azure.core.tracing.common import with_current_context
-from ._shared.encryption import decrypt_blob
-from ._shared.request_handlers import validate_and_format_range_headers
-from ._shared.response_handlers import process_storage_error, parse_length_from_content_range
-from ._deserialize import get_page_ranges_result
-
-
-def process_range_and_offset(start_range, end_range, length, encryption):
-    start_offset, end_offset = 0, 0
-    if encryption.get("key") is not None or encryption.get("resolver") is not None:
-        if start_range is not None:
-            # Align the start of the range along a 16 byte block
-            start_offset = start_range % 16
-            start_range -= start_offset
-
-            # Include an extra 16 bytes for the IV if necessary
-            # Because of the previous offsetting, start_range will always
-            # be a multiple of 16.
-            if start_range > 0:
-                start_offset += 16
-                start_range -= 16
-
-        if length is not None:
-            # Align the end of the range along a 16 byte block
-            end_offset = 15 - (end_range % 16)
-            end_range += end_offset
-
-    return (start_range, end_range), (start_offset, end_offset)
-
-
-def process_content(data, start_offset, end_offset, encryption):
-    if data is None:
-        raise ValueError("Response cannot be None.")
-    try:
-        content = b"".join(list(data))
-    except Exception as error:
-        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
-    if content and encryption.get("key") is not None or encryption.get("resolver") is not None:
-        try:
-            return decrypt_blob(
-                encryption.get("required"),
-                encryption.get("key"),
-                encryption.get("resolver"),
-                content,
-                start_offset,
-                end_offset,
-                data.response.headers,
-            )
-        except Exception as error:
-            raise HttpResponseError(message="Decryption failed.", response=data.response, error=error)
-    return content
-
-
-class _ChunkDownloader(object):  # pylint: disable=too-many-instance-attributes
-    def __init__(
-        self,
-        client=None,
-        non_empty_ranges=None,
-        total_size=None,
-        chunk_size=None,
-        current_progress=None,
-        start_range=None,
-        end_range=None,
-        stream=None,
-        parallel=None,
-        validate_content=None,
-        encryption_options=None,
-        **kwargs
-    ):
-        self.client = client
-        self.non_empty_ranges = non_empty_ranges
-
-        # Information on the download range/chunk size
-        self.chunk_size = chunk_size
-        self.total_size = total_size
-        self.start_index = start_range
-        self.end_index = end_range
-
-        # The destination that we will write to
-        self.stream = stream
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_lock = threading.Lock() if parallel else None
-
-        # For a parallel download, the stream is always seekable, so we note down the current position
-        # in order to seek to the right place when out-of-order chunks come in
-        self.stream_start = stream.tell() if parallel else None
-
-        # Download progress so far
-        self.progress_total = current_progress
-
-        # Encryption
-        self.encryption_options = encryption_options
-
-        # Parameters for each get operation
-        self.validate_content = validate_content
-        self.request_options = kwargs
-
-    def _calculate_range(self, chunk_start):
-        if chunk_start + self.chunk_size > self.end_index:
-            chunk_end = self.end_index
-        else:
-            chunk_end = chunk_start + self.chunk_size
-        return chunk_start, chunk_end
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.end_index:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        chunk_data = self._download_chunk(chunk_start, chunk_end - 1)
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def yield_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        return self._download_chunk(chunk_start, chunk_end - 1)
-
-    def _update_progress(self, length):
-        if self.progress_lock:
-            with self.progress_lock:  # pylint: disable=not-context-manager
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        if self.stream_lock:
-            with self.stream_lock:  # pylint: disable=not-context-manager
-                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-                self.stream.write(chunk_data)
-        else:
-            self.stream.write(chunk_data)
-
-    def _do_optimize(self, given_range_start, given_range_end):
-        # If we have no page range list stored, then assume there's data everywhere for that page blob
-        # or it's a block blob or append blob
-        if self.non_empty_ranges is None:
-            return False
-
-        for source_range in self.non_empty_ranges:
-            # Case 1: As the range list is sorted, if we've reached such a source_range
-            # we've checked all the appropriate source_range already and haven't found any overlapping.
-            # so the given range doesn't have any data and download optimization could be applied.
-            # given range:		|   |
-            # source range:			       |   |
-            if given_range_end < source_range['start']:  # pylint:disable=no-else-return
-                return True
-            # Case 2: the given range comes after source_range, continue checking.
-            # given range:				|   |
-            # source range:	|   |
-            elif source_range['end'] < given_range_start:
-                pass
-            # Case 3: source_range and given range overlap somehow, no need to optimize.
-            else:
-                return False
-        # Went through all src_ranges, but nothing overlapped. Optimization will be applied.
-        return True
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        download_range, offset = process_range_and_offset(
-            chunk_start, chunk_end, chunk_end, self.encryption_options
-        )
-
-        # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
-        # Do optimize and create empty chunk locally if condition is met.
-        if self._do_optimize(download_range[0], download_range[1]):
-            chunk_data = b"\x00" * self.chunk_size
-        else:
-            range_header, range_validation = validate_and_format_range_headers(
-                download_range[0],
-                download_range[1],
-                check_content_md5=self.validate_content
-            )
-
-            try:
-                _, response = self.client.download(
-                    range=range_header,
-                    range_get_content_md5=range_validation,
-                    validate_content=self.validate_content,
-                    data_stream_total=self.total_size,
-                    download_stream_current=self.progress_total,
-                    **self.request_options
-                )
-            except HttpResponseError as error:
-                process_storage_error(error)
-
-            chunk_data = process_content(response, offset[0], offset[1], self.encryption_options)
-
-            # This makes sure that if_match is set so that we can validate
-            # that subsequent downloads are to an unmodified blob
-            if self.request_options.get("modified_access_conditions"):
-                self.request_options["modified_access_conditions"].if_match = response.properties.etag
-
-        return chunk_data
-
-
-class _ChunkIterator(object):
-    """Async iterator for chunks in blob download stream."""
-
-    def __init__(self, size, content, downloader):
-        self.size = size
-        self._current_content = content
-        self._iter_downloader = downloader
-        self._iter_chunks = None
-        self._complete = (size == 0)
-
-    def __len__(self):
-        return self.size
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        """Iterate through responses."""
-        if self._complete:
-            raise StopIteration("Download complete")
-        if not self._iter_downloader:
-            # If no iterator was supplied, the download completed with
-            # the initial GET, so we just return that data
-            self._complete = True
-            return self._current_content
-
-        if not self._iter_chunks:
-            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
-        else:
-            chunk = next(self._iter_chunks)
-            self._current_content = self._iter_downloader.yield_chunk(chunk)
-
-        return self._current_content
-
-    next = __next__  # Python 2 compatibility.
-
-
-class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
-    """A streaming object to download from Azure Storage.
-
-    :ivar str name:
-        The name of the blob being downloaded.
-    :ivar str container:
-        The name of the container where the blob is.
-    :ivar ~azure.storage.blob.BlobProperties properties:
-        The properties of the blob being downloaded. If only a range of the data is being
-        downloaded, this will be reflected in the properties.
-    :ivar int size:
-        The size of the total data in the stream. This will be the byte range if speficied,
-        otherwise the total size of the blob.
-    """
-
-    def __init__(
-        self,
-        clients=None,
-        config=None,
-        start_range=None,
-        end_range=None,
-        validate_content=None,
-        encryption_options=None,
-        max_concurrency=1,
-        name=None,
-        container=None,
-        encoding=None,
-        **kwargs
-    ):
-        self.name = name
-        self.container = container
-        self.properties = None
-        self.size = None
-
-        self._clients = clients
-        self._config = config
-        self._start_range = start_range
-        self._end_range = end_range
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        self._validate_content = validate_content
-        self._encryption_options = encryption_options or {}
-        self._request_options = kwargs
-        self._location_mode = None
-        self._download_complete = False
-        self._current_content = None
-        self._file_size = None
-        self._non_empty_ranges = None
-        self._response = None
-
-        # The service only provides transactional MD5s for chunks under 4MB.
-        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
-        # chunk so a transactional MD5 can be retrieved.
-        self._first_get_size = (
-            self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
-        )
-        initial_request_start = self._start_range if self._start_range is not None else 0
-        if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
-            initial_request_end = self._end_range
-        else:
-            initial_request_end = initial_request_start + self._first_get_size - 1
-
-        self._initial_range, self._initial_offset = process_range_and_offset(
-            initial_request_start, initial_request_end, self._end_range, self._encryption_options
-        )
-
-        self._response = self._initial_request()
-        self.properties = self._response.properties
-        self.properties.name = self.name
-        self.properties.container = self.container
-
-        # Set the content length to the download size instead of the size of
-        # the last range
-        self.properties.size = self.size
-
-        # Overwrite the content range to the user requested range
-        self.properties.content_range = "bytes {0}-{1}/{2}".format(
-            self._start_range,
-            self._end_range,
-            self._file_size
-        )
-
-        # Overwrite the content MD5 as it is the MD5 for the last range instead
-        # of the stored MD5
-        # TODO: Set to the stored MD5 when the service returns this
-        self.properties.content_md5 = None
-
-        if self.size == 0:
-            self._current_content = b""
-        else:
-            self._current_content = process_content(
-                self._response,
-                self._initial_offset[0],
-                self._initial_offset[1],
-                self._encryption_options
-            )
-
-    def __len__(self):
-        return self.size
-
-    def _initial_request(self):
-        range_header, range_validation = validate_and_format_range_headers(
-            self._initial_range[0],
-            self._initial_range[1],
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=self._validate_content
-        )
-
-        try:
-            location_mode, response = self._clients.blob.download(
-                range=range_header,
-                range_get_content_md5=range_validation,
-                validate_content=self._validate_content,
-                data_stream_total=None,
-                download_stream_current=0,
-                **self._request_options
-            )
-
-            # Check the location we read from to ensure we use the same one
-            # for subsequent requests.
-            self._location_mode = location_mode
-
-            # Parse the total file size and adjust the download size if ranges
-            # were specified
-            self._file_size = parse_length_from_content_range(response.properties.content_range)
-            if self._end_range is not None:
-                # Use the end range index unless it is over the end of the file
-                self.size = min(self._file_size, self._end_range - self._start_range + 1)
-            elif self._start_range is not None:
-                self.size = self._file_size - self._start_range
-            else:
-                self.size = self._file_size
-
-        except HttpResponseError as error:
-            if self._start_range is None and error.response.status_code == 416:
-                # Get range will fail on an empty file. If the user did not
-                # request a range, do a regular get request in order to get
-                # any properties.
-                try:
-                    _, response = self._clients.blob.download(
-                        validate_content=self._validate_content,
-                        data_stream_total=0,
-                        download_stream_current=0,
-                        **self._request_options
-                    )
-                except HttpResponseError as error:
-                    process_storage_error(error)
-
-                # Set the download size to empty
-                self.size = 0
-                self._file_size = 0
-            else:
-                process_storage_error(error)
-
-        # get page ranges to optimize downloading sparse page blob
-        if response.properties.blob_type == 'PageBlob':
-            try:
-                page_ranges = self._clients.page_blob.get_page_ranges()
-                self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
-            # according to the REST API documentation:
-            # in a highly fragmented page blob with a large number of writes,
-            # a Get Page Ranges request can fail due to an internal server timeout.
-            # thus, if the page blob is not sparse, it's ok for it to fail
-            except HttpResponseError:
-                pass
-
-        # If the file is small, the download is complete at this point.
-        # If file size is large, download the rest of the file in chunks.
-        if response.properties.size != self.size:
-            # Lock on the etag. This can be overriden by the user by specifying '*'
-            if self._request_options.get("modified_access_conditions"):
-                if not self._request_options["modified_access_conditions"].if_match:
-                    self._request_options["modified_access_conditions"].if_match = response.properties.etag
-        else:
-            self._download_complete = True
-        return response
-
-    def chunks(self):
-        if self.size == 0 or self._download_complete:
-            iter_downloader = None
-        else:
-            data_end = self._file_size
-            if self._end_range is not None:
-                # Use the end range index unless it is over the end of the file
-                data_end = min(self._file_size, self._end_range + 1)
-            iter_downloader = _ChunkDownloader(
-                client=self._clients.blob,
-                non_empty_ranges=self._non_empty_ranges,
-                total_size=self.size,
-                chunk_size=self._config.max_chunk_get_size,
-                current_progress=self._first_get_size,
-                start_range=self._initial_range[1] + 1,  # start where the first download ended
-                end_range=data_end,
-                stream=None,
-                parallel=False,
-                validate_content=self._validate_content,
-                encryption_options=self._encryption_options,
-                use_location=self._location_mode,
-                **self._request_options
-            )
-        return _ChunkIterator(
-            size=self.size,
-            content=self._current_content,
-            downloader=iter_downloader)
-
-    def readall(self):
-        """Download the contents of this blob.
-
-        This operation is blocking until all data is downloaded.
-        :rtype: bytes or str
-        """
-        stream = BytesIO()
-        self.readinto(stream)
-        data = stream.getvalue()
-        if self._encoding:
-            return data.decode(self._encoding)
-        return data
-
-    def content_as_bytes(self, max_concurrency=1):
-        """Download the contents of this file.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :rtype: bytes
-        """
-        warnings.warn(
-            "content_as_bytes is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        return self.readall()
-
-    def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
-        """Download the contents of this blob, and decode as text.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :param str encoding:
-            Test encoding to decode the downloaded bytes. Default is UTF-8.
-        :rtype: str
-        """
-        warnings.warn(
-            "content_as_text is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        return self.readall()
-
-    def readinto(self, stream):
-        """Download the contents of this file to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The number of bytes read.
-        :rtype: int
-        """
-        # The stream must be seekable if parallel download is required
-        parallel = self._max_concurrency > 1
-        if parallel:
-            error_message = "Target stream handle must be seekable."
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(error_message)
-
-            try:
-                stream.seek(stream.tell())
-            except (NotImplementedError, AttributeError):
-                raise ValueError(error_message)
-
-        # Write the content to the user stream
-        stream.write(self._current_content)
-        if self._download_complete:
-            return self.size
-
-        data_end = self._file_size
-        if self._end_range is not None:
-            # Use the length unless it is over the end of the file
-            data_end = min(self._file_size, self._end_range + 1)
-
-        downloader = _ChunkDownloader(
-            client=self._clients.blob,
-            non_empty_ranges=self._non_empty_ranges,
-            total_size=self.size,
-            chunk_size=self._config.max_chunk_get_size,
-            current_progress=self._first_get_size,
-            start_range=self._initial_range[1] + 1,  # Start where the first download ended
-            end_range=data_end,
-            stream=stream,
-            parallel=parallel,
-            validate_content=self._validate_content,
-            encryption_options=self._encryption_options,
-            use_location=self._location_mode,
-            **self._request_options
-        )
-        if parallel:
-            import concurrent.futures
-            executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency)
-            list(executor.map(
-                    with_current_context(downloader.process_chunk),
-                    downloader.get_chunk_offsets()
-                ))
-        else:
-            for chunk in downloader.get_chunk_offsets():
-                downloader.process_chunk(chunk)
-        return self.size
-
-    def download_to_stream(self, stream, max_concurrency=1):
-        """Download the contents of this blob to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The properties of the downloaded blob.
-        :rtype: Any
-        """
-        warnings.warn(
-            "download_to_stream is deprecated, use readinto instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        self.readinto(stream)
-        return self.properties
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,18 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._azure_blob_storage import AzureBlobStorage
-__all__ = ['AzureBlobStorage']
-
-from .version import VERSION
-
-__version__ = VERSION
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_azure_blob_storage.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_azure_blob_storage.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_azure_blob_storage.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_azure_blob_storage.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,83 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import PipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration import AzureBlobStorageConfiguration
-from azure.core.exceptions import map_error
-from .operations import ServiceOperations
-from .operations import ContainerOperations
-from .operations import DirectoryOperations
-from .operations import BlobOperations
-from .operations import PageBlobOperations
-from .operations import AppendBlobOperations
-from .operations import BlockBlobOperations
-from . import models
-
-
-class AzureBlobStorage(object):
-    """AzureBlobStorage
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.blob.operations.ServiceOperations
-    :ivar container: Container operations
-    :vartype container: azure.storage.blob.operations.ContainerOperations
-    :ivar directory: Directory operations
-    :vartype directory: azure.storage.blob.operations.DirectoryOperations
-    :ivar blob: Blob operations
-    :vartype blob: azure.storage.blob.operations.BlobOperations
-    :ivar page_blob: PageBlob operations
-    :vartype page_blob: azure.storage.blob.operations.PageBlobOperations
-    :ivar append_blob: AppendBlob operations
-    :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations
-    :ivar block_blob: BlockBlob operations
-    :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    """
-
-    def __init__(self, url, **kwargs):
-
-        base_url = '{url}'
-        self._config = AzureBlobStorageConfiguration(url, **kwargs)
-        self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2019-07-07'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.container = ContainerOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.directory = DirectoryOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.blob = BlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.page_blob = PageBlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.append_blob = AppendBlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.block_blob = BlockBlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    def close(self):
-        self._client.close()
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-    def __exit__(self, *exc_details):
-        self._client.__exit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_configuration.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_configuration.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_configuration.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/_configuration.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,52 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from .version import VERSION
-
-
-class AzureBlobStorageConfiguration(Configuration):
-    """Configuration for AzureBlobStorage
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    :ivar version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    """
-
-    def __init__(self, url, **kwargs):
-
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-
-        super(AzureBlobStorageConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION))
-        self.generate_client_request_id = True
-
-        self.url = url
-        self.version = "2019-07-07"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._azure_blob_storage_async import AzureBlobStorage
-__all__ = ['AzureBlobStorage']
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_azure_blob_storage_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_azure_blob_storage_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_azure_blob_storage_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_azure_blob_storage_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,84 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import AsyncPipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration_async import AzureBlobStorageConfiguration
-from azure.core.exceptions import map_error
-from .operations_async import ServiceOperations
-from .operations_async import ContainerOperations
-from .operations_async import DirectoryOperations
-from .operations_async import BlobOperations
-from .operations_async import PageBlobOperations
-from .operations_async import AppendBlobOperations
-from .operations_async import BlockBlobOperations
-from .. import models
-
-
-class AzureBlobStorage(object):
-    """AzureBlobStorage
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.blob.aio.operations_async.ServiceOperations
-    :ivar container: Container operations
-    :vartype container: azure.storage.blob.aio.operations_async.ContainerOperations
-    :ivar directory: Directory operations
-    :vartype directory: azure.storage.blob.aio.operations_async.DirectoryOperations
-    :ivar blob: Blob operations
-    :vartype blob: azure.storage.blob.aio.operations_async.BlobOperations
-    :ivar page_blob: PageBlob operations
-    :vartype page_blob: azure.storage.blob.aio.operations_async.PageBlobOperations
-    :ivar append_blob: AppendBlob operations
-    :vartype append_blob: azure.storage.blob.aio.operations_async.AppendBlobOperations
-    :ivar block_blob: BlockBlob operations
-    :vartype block_blob: azure.storage.blob.aio.operations_async.BlockBlobOperations
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    """
-
-    def __init__(
-            self, url, **kwargs):
-
-        base_url = '{url}'
-        self._config = AzureBlobStorageConfiguration(url, **kwargs)
-        self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2019-07-07'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.container = ContainerOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.directory = DirectoryOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.blob = BlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.page_blob = PageBlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.append_blob = AppendBlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.block_blob = BlockBlobOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    async def close(self):
-        await self._client.close()
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-    async def __aexit__(self, *exc_details):
-        await self._client.__aexit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_configuration_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_configuration_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_configuration_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/_configuration_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,53 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from ..version import VERSION
-
-
-class AzureBlobStorageConfiguration(Configuration):
-    """Configuration for AzureBlobStorage
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    :ivar version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    """
-
-    def __init__(self, url, **kwargs):
-
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-
-        super(AzureBlobStorageConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION))
-        self.generate_client_request_id = True
-        self.accept_language = None
-
-        self.url = url
-        self.version = "2019-07-07"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,28 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations_async import ServiceOperations
-from ._container_operations_async import ContainerOperations
-from ._directory_operations_async import DirectoryOperations
-from ._blob_operations_async import BlobOperations
-from ._page_blob_operations_async import PageBlobOperations
-from ._append_blob_operations_async import AppendBlobOperations
-from ._block_blob_operations_async import BlockBlobOperations
-
-__all__ = [
-    'ServiceOperations',
-    'ContainerOperations',
-    'DirectoryOperations',
-    'BlobOperations',
-    'PageBlobOperations',
-    'AppendBlobOperations',
-    'BlockBlobOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_append_blob_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_append_blob_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_append_blob_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_append_blob_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,563 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class AppendBlobOperations:
-    """AppendBlobOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob".
-    :ivar comp: . Constant value: "appendblock".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_blob_type = "AppendBlob"
-        self.comp = "appendblock"
-
-    async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Create Append Blob operation creates a new append blob.
-
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Append Block operation commits a new block of data to the end of an
-        existing append blob. The Append Block operation is permitted only if
-        the blob was created with x-ms-blob-type set to AppendBlob. Append
-        Block is supported only on version 2015-02-21 version or later.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param append_position_access_conditions: Additional parameters for
-         the operation
-        :type append_position_access_conditions:
-         ~azure.storage.blob.models.AppendPositionAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        max_size = None
-        if append_position_access_conditions is not None:
-            max_size = append_position_access_conditions.max_size
-        append_position = None
-        if append_position_access_conditions is not None:
-            append_position = append_position_access_conditions.append_position
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.append_block.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if max_size is not None:
-            header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
-        if append_position is not None:
-            header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    append_block.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Append Block operation commits a new block of data to the end of an
-        existing append blob where the contents are read from a source url. The
-        Append Block operation is permitted only if the blob was created with
-        x-ms-blob-type set to AppendBlob. Append Block is supported only on
-        version 2015-02-21 version or later.
-
-        :param source_url: Specify a URL to the copy source.
-        :type source_url: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param source_range: Bytes of source data in the specified range.
-        :type source_range: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_contentcrc64: Specify the crc64 calculated for the range
-         of bytes that must be read from the copy source.
-        :type source_contentcrc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param append_position_access_conditions: Additional parameters for
-         the operation
-        :type append_position_access_conditions:
-         ~azure.storage.blob.models.AppendPositionAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        max_size = None
-        if append_position_access_conditions is not None:
-            max_size = append_position_access_conditions.max_size
-        append_position = None
-        if append_position_access_conditions is not None:
-            append_position = append_position_access_conditions.append_position
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        # Construct URL
-        url = self.append_block_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
-        if source_range is not None:
-            header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        if source_contentcrc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if max_size is not None:
-            header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
-        if append_position is not None:
-            header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    append_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_blob_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_blob_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_blob_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_blob_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2443 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class BlobOperations:
-    """BlobOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_requires_sync: . Constant value: "true".
-    :ivar x_ms_copy_action: . Constant value: "abort".
-    :ivar restype: . Constant value: "account".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_requires_sync = "true"
-        self.x_ms_copy_action = "abort"
-        self.restype = "account"
-
-    async def download(self, snapshot=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Download operation reads or downloads a blob from the system,
-        including its metadata and properties. You can also call Download to
-        read a snapshot.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param range_get_content_md5: When set to true and specified together
-         with the Range, the service returns the MD5 hash for the range, as
-         long as the range is less than or equal to 4 MB in size.
-        :type range_get_content_md5: bool
-        :param range_get_content_crc64: When set to true and specified
-         together with the Range, the service returns the CRC64 hash for the
-         range, as long as the range is less than or equal to 4 MB in size.
-        :type range_get_content_crc64: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.download.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        if range_get_content_md5 is not None:
-            header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool')
-        if range_get_content_crc64 is not None:
-            header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 206]:
-            await response.load_body()
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-        if response.status_code == 206:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    download.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Get Properties operation returns all user-defined metadata,
-        standard HTTP properties, and system properties for the blob. It does
-        not return the content of the blob.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')),
-                'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')),
-                'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')),
-                'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')),
-                'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """If the storage account's soft delete feature is disabled then, when a
-        blob is deleted, it is permanently removed from the storage account. If
-        the storage account's soft delete feature is enabled, then, when a blob
-        is deleted, it is marked for deletion and becomes inaccessible
-        immediately. However, the blob service retains the blob or snapshot for
-        the number of days specified by the DeleteRetentionPolicy section of
-        [Storage service properties] (Set-Blob-Service-Properties.md). After
-        the specified number of days has passed, the blob's data is permanently
-        removed from the storage account. Note that you continue to be charged
-        for the soft-deleted blob's storage until it is permanently removed.
-        Use the List Blobs API and specify the "include=deleted" query
-        parameter to discover which blobs and snapshots have been soft deleted.
-        You can then use the Undelete Blob API to restore a soft-deleted blob.
-        All other operations on a soft-deleted blob or snapshot causes the
-        service to return an HTTP status code of 404 (ResourceNotFound).
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param delete_snapshots: Required if the blob has associated
-         snapshots. Specify one of the following two options: include: Delete
-         the base blob and all of its snapshots. only: Delete only the blob's
-         snapshots and not the blob itself. Possible values include: 'include',
-         'only'
-        :type delete_snapshots: str or
-         ~azure.storage.blob.models.DeleteSnapshotsOptionType
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if delete_snapshots is not None:
-            header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_acl: Sets POSIX access control rights on files and
-         directories. The value is a comma-separated list of access control
-         entries. Each access control entry (ACE) consists of a scope, a type,
-         a user or group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type posix_acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "setAccessControl"
-
-        # Construct URL
-        url = self.set_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    set_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Get the owner, group, permissions, or access control list for a blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the identity values returned in
-         the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "getAccessControl"
-
-        # Construct URL
-        url = self.get_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
-                'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
-                'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
-                'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    get_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
-        """Rename a blob/file.  By default, the destination is overwritten and if
-        the destination already exists and has a lease the lease is broken.
-        This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param rename_source: The file or directory to be renamed. The value
-         must have the following format: "/{filesysystem}/{path}".  If
-         "x-ms-properties" is specified, the properties will overwrite the
-         existing properties; otherwise, the existing properties will be
-         preserved.
-        :type rename_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param path_rename_mode: Determines the behavior of the rename
-         operation. Possible values include: 'legacy', 'posix'
-        :type path_rename_mode: str or
-         ~azure.storage.blob.models.PathRenameMode
-        :param directory_properties: Optional.  User-defined properties to be
-         stored with the file or directory, in the format of a comma-separated
-         list of name and value pairs "n1=v1, n2=v2, ...", where each value is
-         base64 encoded.
-        :type directory_properties: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_umask: Only valid if Hierarchical Namespace is enabled
-         for the account. This umask restricts permission settings for file and
-         directory, and will only be applied when default Acl does not exist in
-         parent directory. If the umask bit has set, it means that the
-         corresponding permission will be disabled. Otherwise the corresponding
-         permission will be determined by the permission. A 4-digit octal
-         notation (e.g. 0022) is supported here. If no umask was specified, a
-         default umask - 0027 will be used.
-        :type posix_umask: str
-        :param source_lease_id: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param directory_http_headers: Additional parameters for the operation
-        :type directory_http_headers:
-         ~azure.storage.blob.models.DirectoryHttpHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if directory_http_headers is not None:
-            cache_control = directory_http_headers.cache_control
-        content_type = None
-        if directory_http_headers is not None:
-            content_type = directory_http_headers.content_type
-        content_encoding = None
-        if directory_http_headers is not None:
-            content_encoding = directory_http_headers.content_encoding
-        content_language = None
-        if directory_http_headers is not None:
-            content_language = directory_http_headers.content_language
-        content_disposition = None
-        if directory_http_headers is not None:
-            content_disposition = directory_http_headers.content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        # Construct URL
-        url = self.rename.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if path_rename_mode is not None:
-            query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
-        if directory_properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
-        if source_lease_id is not None:
-            header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    rename.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Undelete a blob that was previously soft deleted.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "undelete"
-
-        # Construct URL
-        url = self.undelete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    undelete.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Set HTTP Headers operation sets system properties on the blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_http_headers.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_http_headers.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Set Blob Metadata operation sets user-defined metadata for the
-        specified blob as one or more name-value pairs.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param duration: Specifies the duration of the lease, in seconds, or
-         negative one (-1) for a lease that never expires. A non-infinite lease
-         can be between 15 and 60 seconds. A lease duration cannot be changed
-         using renew or change.
-        :type duration: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "acquire"
-
-        # Construct URL
-        url = self.acquire_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    acquire_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "release"
-
-        # Construct URL
-        url = self.release_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    release_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "renew"
-
-        # Construct URL
-        url = self.renew_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    renew_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "change"
-
-        # Construct URL
-        url = self.change_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    change_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param break_period: For a break operation, proposed duration the
-         lease should continue before it is broken, in seconds, between 0 and
-         60. This break period is only used if it is shorter than the time
-         remaining on the lease. If longer, the time remaining on the lease is
-         used. A new lease will not be available before the break period has
-         expired, but the lease may be held for longer than the break period.
-         If this header does not appear with a break operation, a
-         fixed-duration lease breaks after the remaining lease period elapses,
-         and an infinite lease breaks immediately.
-        :type break_period: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "break"
-
-        # Construct URL
-        url = self.break_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if break_period is not None:
-            header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    break_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """The Create Snapshot operation creates a read-only snapshot of a blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "snapshot"
-
-        # Construct URL
-        url = self.create_snapshot.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create_snapshot.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """The Start Copy From URL operation copies a blob or an internet resource
-        to a new blob.
-
-        :param copy_source: Specifies the name of the source page blob
-         snapshot. This value is a URL of up to 2 KB in length that specifies a
-         page blob snapshot. The value should be URL-encoded as it would appear
-         in a request URI. The source blob must either be public or must be
-         authenticated via a shared access signature.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param rehydrate_priority: Optional: Indicates the priority with which
-         to rehydrate an archived blob. Possible values include: 'High',
-         'Standard'
-        :type rehydrate_priority: str or
-         ~azure.storage.blob.models.RehydratePriority
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.start_copy_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        if rehydrate_priority is not None:
-            header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """The Copy From URL operation copies a blob or an internet resource to a
-        new blob. It will not return a response until the copy is complete.
-
-        :param copy_source: Specifies the name of the source page blob
-         snapshot. This value is a URL of up to 2 KB in length that specifies a
-         page blob snapshot. The value should be URL-encoded as it would appear
-         in a request URI. The source blob must either be public or must be
-         authenticated via a shared access signature.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.copy_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """The Abort Copy From URL operation aborts a pending Copy From URL
-        operation, and leaves a destination blob with zero length and full
-        metadata.
-
-        :param copy_id: The copy identifier provided in the x-ms-copy-id
-         header of the original Copy Blob operation.
-        :type copy_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "copy"
-
-        # Construct URL
-        url = self.abort_copy_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """The Set Tier operation sets the tier on a blob. The operation is
-        allowed on a page blob in a premium storage account and on a block blob
-        in a blob storage account (locally redundant storage only). A premium
-        page blob's tier determines the allowed size, IOPS, and bandwidth of
-        the blob. A block blob's tier determines Hot/Cool/Archive storage type.
-        This operation does not update the blob's ETag.
-
-        :param tier: Indicates the tier to be set on the blob. Possible values
-         include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60',
-         'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierRequired
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param rehydrate_priority: Optional: Indicates the priority with which
-         to rehydrate an archived blob. Possible values include: 'High',
-         'Standard'
-        :type rehydrate_priority: str or
-         ~azure.storage.blob.models.RehydratePriority
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "tier"
-
-        # Construct URL
-        url = self.set_tier.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        if rehydrate_priority is not None:
-            header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_tier.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def get_account_info(self, *, cls=None, **kwargs):
-        """Returns the sku name and account kind .
-
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_account_info.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
-                'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_account_info.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_block_blob_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_block_blob_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_block_blob_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_block_blob_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,802 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class BlockBlobOperations:
-    """BlockBlobOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_blob_type = "BlockBlob"
-
-    async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Upload Block Blob operation updates the content of an existing
-        block blob. Updating an existing block blob overwrites any existing
-        metadata on the blob. Partial updates are not supported with Put Blob;
-        the content of the existing blob is overwritten with the content of the
-        new blob. To perform a partial update of the content of a block blob,
-        use the Put Block List operation.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.upload.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, *, cls=None, **kwargs):
-        """The Stage Block operation creates a new block to be committed as part
-        of a blob.
-
-        :param block_id: A valid Base64 string value that identifies the
-         block. Prior to encoding, the string must be less than or equal to 64
-         bytes in size. For a given blob, the length of the value specified for
-         the blockid parameter must be the same size for each block.
-        :type block_id: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param body: Initial data
-        :type body: Generator
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-
-        comp = "block"
-
-        # Construct URL
-        url = self.stage_block.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    stage_block.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Stage Block operation creates a new block to be committed as part
-        of a blob where the contents are read from a URL.
-
-        :param block_id: A valid Base64 string value that identifies the
-         block. Prior to encoding, the string must be less than or equal to 64
-         bytes in size. For a given blob, the length of the value specified for
-         the blockid parameter must be the same size for each block.
-        :type block_id: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param source_url: Specify a URL to the copy source.
-        :type source_url: str
-        :param source_range: Bytes of source data in the specified range.
-        :type source_range: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_contentcrc64: Specify the crc64 calculated for the range
-         of bytes that must be read from the copy source.
-        :type source_contentcrc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        comp = "block"
-
-        # Construct URL
-        url = self.stage_block_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
-        if source_range is not None:
-            header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        if source_contentcrc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Commit Block List operation writes a blob by specifying the list of
-        block IDs that make up the blob. In order to be written as part of a
-        blob, a block must have been successfully written to the server in a
-        prior Put Block operation. You can call Put Block List to update a blob
-        by uploading only those blocks that have changed, then committing the
-        new and existing blocks together. You can do this by specifying whether
-        to commit a block from the committed block list or from the uncommitted
-        block list, or to commit the most recently uploaded version of the
-        block, whichever list it may belong to.
-
-        :param blocks:
-        :type blocks: ~azure.storage.blob.models.BlockLookupList
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "blocklist"
-
-        # Construct URL
-        url = self.commit_block_list.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(blocks, 'BlockLookupList')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    commit_block_list.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """The Get Block List operation retrieves the list of blocks that have
-        been uploaded as part of a block blob.
-
-        :param list_type: Specifies whether to return the list of committed
-         blocks, the list of uncommitted blocks, or both lists together.
-         Possible values include: 'committed', 'uncommitted', 'all'
-        :type list_type: str or ~azure.storage.blob.models.BlockListType
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: BlockList or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.BlockList
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "blocklist"
-
-        # Construct URL
-        url = self.get_block_list.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('BlockList', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_block_list.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_container_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_container_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_container_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_container_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1327 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class ContainerOperations:
-    """ContainerOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    async def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, *, cls=None, **kwargs):
-        """creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param access: Specifies whether data in the container may be accessed
-         publicly and the level of access. Possible values include:
-         'container', 'blob'
-        :type access: str or ~azure.storage.blob.models.PublicAccessType
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param container_cpk_scope_info: Additional parameters for the
-         operation
-        :type container_cpk_scope_info:
-         ~azure.storage.blob.models.ContainerCpkScopeInfo
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        default_encryption_scope = None
-        if container_cpk_scope_info is not None:
-            default_encryption_scope = container_cpk_scope_info.default_encryption_scope
-        prevent_encryption_scope_override = None
-        if container_cpk_scope_info is not None:
-            prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
-
-        restype = "container"
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if access is not None:
-            header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if default_encryption_scope is not None:
-            header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str')
-        if prevent_encryption_scope_override is not None:
-            header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{containerName}'}
-
-    async def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """returns all user-defined metadata and system properties for the
-        specified container. The data returned does not include the container's
-        list of blobs.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        restype = "container"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
-                'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')),
-                'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')),
-                'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')),
-                'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{containerName}'}
-
-    async def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """operation marks the specified container for deletion. The container and
-        any blobs contained within it are later deleted during garbage
-        collection.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        restype = "container"
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{containerName}'}
-
-    async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """operation sets one or more user-defined name-value pairs for the
-        specified container.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-
-        restype = "container"
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{containerName}'}
-
-    async def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """gets the permissions for the specified container. The permissions
-        indicate whether container data may be accessed publicly.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.blob.models.SignedIdentifier]
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        restype = "container"
-        comp = "acl"
-
-        # Construct URL
-        url = self.get_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[SignedIdentifier]', response)
-            header_dict = {
-                'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_access_policy.metadata = {'url': '/{containerName}'}
-
-    async def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """sets the permissions for the specified container. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param container_acl: the acls for the container
-        :type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param access: Specifies whether data in the container may be accessed
-         publicly and the level of access. Possible values include:
-         'container', 'blob'
-        :type access: str or ~azure.storage.blob.models.PublicAccessType
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        restype = "container"
-        comp = "acl"
-
-        # Construct URL
-        url = self.set_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        if access is not None:
-            header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct body
-        serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}}
-        if container_acl is not None:
-            body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_access_policy.metadata = {'url': '/{containerName}'}
-
-    async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param duration: Specifies the duration of the lease, in seconds, or
-         negative one (-1) for a lease that never expires. A non-infinite lease
-         can be between 15 and 60 seconds. A lease duration cannot be changed
-         using renew or change.
-        :type duration: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "acquire"
-
-        # Construct URL
-        url = self.acquire_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    acquire_lease.metadata = {'url': '/{containerName}'}
-
-    async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "release"
-
-        # Construct URL
-        url = self.release_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    release_lease.metadata = {'url': '/{containerName}'}
-
-    async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "renew"
-
-        # Construct URL
-        url = self.renew_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    renew_lease.metadata = {'url': '/{containerName}'}
-
-    async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param break_period: For a break operation, proposed duration the
-         lease should continue before it is broken, in seconds, between 0 and
-         60. This break period is only used if it is shorter than the time
-         remaining on the lease. If longer, the time remaining on the lease is
-         used. A new lease will not be available before the break period has
-         expired, but the lease may be held for longer than the break period.
-         If this header does not appear with a break operation, a
-         fixed-duration lease breaks after the remaining lease period elapses,
-         and an infinite lease breaks immediately.
-        :type break_period: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "break"
-
-        # Construct URL
-        url = self.break_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if break_period is not None:
-            header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    break_lease.metadata = {'url': '/{containerName}'}
-
-    async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "change"
-
-        # Construct URL
-        url = self.change_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    change_lease.metadata = {'url': '/{containerName}'}
-
-    async def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """[Update] The List Blobs operation returns a list of the blobs under the
-        specified container.
-
-        :param prefix: Filters the results to return only containers whose
-         name begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of containers to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all containers remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of containers to
-         return. If the request does not specify maxresults, or specifies a
-         value greater than 5000, the server will return up to 5000 items. Note
-         that if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify one or more datasets
-         to include in the response.
-        :type include: list[str or
-         ~azure.storage.blob.models.ListBlobsIncludeItem]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListBlobsFlatSegmentResponse or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "container"
-        comp = "list"
-
-        # Construct URL
-        url = self.list_blob_flat_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_blob_flat_segment.metadata = {'url': '/{containerName}'}
-
-    async def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """[Update] The List Blobs operation returns a list of the blobs under the
-        specified container.
-
-        :param delimiter: When the request includes this parameter, the
-         operation returns a BlobPrefix element in the response body that acts
-         as a placeholder for all blobs whose names begin with the same
-         substring up to the appearance of the delimiter character. The
-         delimiter may be a single character or a string.
-        :type delimiter: str
-        :param prefix: Filters the results to return only containers whose
-         name begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of containers to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all containers remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of containers to
-         return. If the request does not specify maxresults, or specifies a
-         value greater than 5000, the server will return up to 5000 items. Note
-         that if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify one or more datasets
-         to include in the response.
-        :type include: list[str or
-         ~azure.storage.blob.models.ListBlobsIncludeItem]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListBlobsHierarchySegmentResponse or the result of
-         cls(response)
-        :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "container"
-        comp = "list"
-
-        # Construct URL
-        url = self.list_blob_hierarchy_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'}
-
-    async def get_account_info(self, *, cls=None, **kwargs):
-        """Returns the sku name and account kind .
-
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "account"
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_account_info.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
-                'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_account_info.metadata = {'url': '/{containerName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,740 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class DirectoryOperations:
-    """DirectoryOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar resource: . Constant value: "directory".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.resource = "directory"
-
-    async def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Create a directory. By default, the destination is overwritten and if
-        the destination already exists and has a lease the lease is broken.
-        This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param directory_properties: Optional.  User-defined properties to be
-         stored with the file or directory, in the format of a comma-separated
-         list of name and value pairs "n1=v1, n2=v2, ...", where each value is
-         base64 encoded.
-        :type directory_properties: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_umask: Only valid if Hierarchical Namespace is enabled
-         for the account. This umask restricts permission settings for file and
-         directory, and will only be applied when default Acl does not exist in
-         parent directory. If the umask bit has set, it means that the
-         corresponding permission will be disabled. Otherwise the corresponding
-         permission will be determined by the permission. A 4-digit octal
-         notation (e.g. 0022) is supported here. If no umask was specified, a
-         default umask - 0027 will be used.
-        :type posix_umask: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param directory_http_headers: Additional parameters for the operation
-        :type directory_http_headers:
-         ~azure.storage.blob.models.DirectoryHttpHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if directory_http_headers is not None:
-            cache_control = directory_http_headers.cache_control
-        content_type = None
-        if directory_http_headers is not None:
-            content_type = directory_http_headers.content_type
-        content_encoding = None
-        if directory_http_headers is not None:
-            content_encoding = directory_http_headers.content_encoding
-        content_language = None
-        if directory_http_headers is not None:
-            content_language = directory_http_headers.content_language
-        content_disposition = None
-        if directory_http_headers is not None:
-            content_disposition = directory_http_headers.content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if directory_properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
-        """Rename a directory. By default, the destination is overwritten and if
-        the destination already exists and has a lease the lease is broken.
-        This operation supports conditional HTTP requests. For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param rename_source: The file or directory to be renamed. The value
-         must have the following format: "/{filesysystem}/{path}".  If
-         "x-ms-properties" is specified, the properties will overwrite the
-         existing properties; otherwise, the existing properties will be
-         preserved.
-        :type rename_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param marker: When renaming a directory, the number of paths that are
-         renamed with each invocation is limited.  If the number of paths to be
-         renamed exceeds this limit, a continuation token is returned in this
-         response header.  When a continuation token is returned in the
-         response, it must be specified in a subsequent invocation of the
-         rename operation to continue renaming the directory.
-        :type marker: str
-        :param path_rename_mode: Determines the behavior of the rename
-         operation. Possible values include: 'legacy', 'posix'
-        :type path_rename_mode: str or
-         ~azure.storage.blob.models.PathRenameMode
-        :param directory_properties: Optional.  User-defined properties to be
-         stored with the file or directory, in the format of a comma-separated
-         list of name and value pairs "n1=v1, n2=v2, ...", where each value is
-         base64 encoded.
-        :type directory_properties: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_umask: Only valid if Hierarchical Namespace is enabled
-         for the account. This umask restricts permission settings for file and
-         directory, and will only be applied when default Acl does not exist in
-         parent directory. If the umask bit has set, it means that the
-         corresponding permission will be disabled. Otherwise the corresponding
-         permission will be determined by the permission. A 4-digit octal
-         notation (e.g. 0022) is supported here. If no umask was specified, a
-         default umask - 0027 will be used.
-        :type posix_umask: str
-        :param source_lease_id: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param directory_http_headers: Additional parameters for the operation
-        :type directory_http_headers:
-         ~azure.storage.blob.models.DirectoryHttpHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if directory_http_headers is not None:
-            cache_control = directory_http_headers.cache_control
-        content_type = None
-        if directory_http_headers is not None:
-            content_type = directory_http_headers.content_type
-        content_encoding = None
-        if directory_http_headers is not None:
-            content_encoding = directory_http_headers.content_encoding
-        content_language = None
-        if directory_http_headers is not None:
-            content_language = directory_http_headers.content_language
-        content_disposition = None
-        if directory_http_headers is not None:
-            content_disposition = directory_http_headers.content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        # Construct URL
-        url = self.rename.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if marker is not None:
-            query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
-        if path_rename_mode is not None:
-            query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
-        if directory_properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
-        if source_lease_id is not None:
-            header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    rename.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Deletes the directory.
-
-        :param recursive_directory_delete: If "true", all paths beneath the
-         directory will be deleted. If "false" and the directory is non-empty,
-         an error occurs.
-        :type recursive_directory_delete: bool
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param marker: When renaming a directory, the number of paths that are
-         renamed with each invocation is limited.  If the number of paths to be
-         renamed exceeds this limit, a continuation token is returned in this
-         response header.  When a continuation token is returned in the
-         response, it must be specified in a subsequent invocation of the
-         rename operation to continue renaming the directory.
-        :type marker: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool')
-        if marker is not None:
-            query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a
-        directory.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_acl: Sets POSIX access control rights on files and
-         directories. The value is a comma-separated list of access control
-         entries. Each access control entry (ACE) consists of a scope, a type,
-         a user or group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type posix_acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "setAccessControl"
-
-        # Construct URL
-        url = self.set_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    set_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Get the owner, group, permissions, or access control list for a
-        directory.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the identity values returned in
-         the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "getAccessControl"
-
-        # Construct URL
-        url = self.get_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
-                'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
-                'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
-                'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    get_access_control.metadata = {'url': '/{filesystem}/{path}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_page_blob_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_page_blob_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_page_blob_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_page_blob_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1348 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class PageBlobOperations:
-    """PageBlobOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_blob_type = "PageBlob"
-
-    async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Create operation creates a new page blob.
-
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param blob_content_length: This header specifies the maximum size for
-         the page blob, up to 1 TB. The page blob size must be aligned to a
-         512-byte boundary.
-        :type blob_content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param tier: Optional. Indicates the tier to be set on the page blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80'
-        :type tier: str or
-         ~azure.storage.blob.models.PremiumPageBlobAccessTier
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param blob_sequence_number: Set for page blobs only. The sequence
-         number is a user-controlled value that you can use to track requests.
-         The value of the sequence number must be between 0 and 2^63 - 1.
-        :type blob_sequence_number: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
-        if blob_sequence_number is not None:
-            header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Upload Pages operation writes a range of pages to a page blob.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param sequence_number_access_conditions: Additional parameters for
-         the operation
-        :type sequence_number_access_conditions:
-         ~azure.storage.blob.models.SequenceNumberAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_sequence_number_less_than_or_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
-        if_sequence_number_less_than = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
-        if_sequence_number_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "page"
-        page_write = "update"
-
-        # Construct URL
-        url = self.upload_pages.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_sequence_number_less_than_or_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
-        if if_sequence_number_less_than is not None:
-            header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
-        if if_sequence_number_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_pages.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Clear Pages operation clears a set of pages from a page blob.
-
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param sequence_number_access_conditions: Additional parameters for
-         the operation
-        :type sequence_number_access_conditions:
-         ~azure.storage.blob.models.SequenceNumberAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_sequence_number_less_than_or_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
-        if_sequence_number_less_than = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
-        if_sequence_number_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "page"
-        page_write = "clear"
-
-        # Construct URL
-        url = self.clear_pages.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_sequence_number_less_than_or_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
-        if if_sequence_number_less_than is not None:
-            header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
-        if if_sequence_number_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    clear_pages.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Upload Pages operation writes a range of pages to a page blob where
-        the contents are read from a URL.
-
-        :param source_url: Specify a URL to the copy source.
-        :type source_url: str
-        :param source_range: Bytes of source data in the specified range. The
-         length of this range should match the ContentLength header and
-         x-ms-range/Range destination range header.
-        :type source_range: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param range: The range of bytes to which the source range would be
-         written. The range should be 512 aligned and range-end is required.
-        :type range: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_contentcrc64: Specify the crc64 calculated for the range
-         of bytes that must be read from the copy source.
-        :type source_contentcrc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param sequence_number_access_conditions: Additional parameters for
-         the operation
-        :type sequence_number_access_conditions:
-         ~azure.storage.blob.models.SequenceNumberAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_sequence_number_less_than_or_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
-        if_sequence_number_less_than = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
-        if_sequence_number_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        comp = "page"
-        page_write = "update"
-
-        # Construct URL
-        url = self.upload_pages_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
-        header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        if source_contentcrc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_sequence_number_less_than_or_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
-        if if_sequence_number_less_than is not None:
-            header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
-        if if_sequence_number_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Get Page Ranges operation returns the list of valid page ranges for
-        a page blob or snapshot of a page blob.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: PageList or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.PageList
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "pagelist"
-
-        # Construct URL
-        url = self.get_page_ranges.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('PageList', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_page_ranges.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Get Page Ranges Diff operation returns the list of valid page
-        ranges for a page blob that were changed between target blob and
-        previous snapshot.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param prevsnapshot: Optional in version 2015-07-08 and newer. The
-         prevsnapshot parameter is a DateTime value that specifies that the
-         response will contain only pages that were changed between target blob
-         and previous snapshot. Changed pages include both updated and cleared
-         pages. The target blob may be a snapshot, as long as the snapshot
-         specified by prevsnapshot is the older of the two. Note that
-         incremental snapshots are currently supported only for blobs created
-         on or after January 1, 2016.
-        :type prevsnapshot: str
-        :param prev_snapshot_url: Optional. This header is only supported in
-         service versions 2019-04-19 and after and specifies the URL of a
-         previous snapshot of the target blob. The response will only contain
-         pages that were changed between the target blob and its previous
-         snapshot.
-        :type prev_snapshot_url: str
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: PageList or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.PageList
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "pagelist"
-
-        # Construct URL
-        url = self.get_page_ranges_diff.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if prevsnapshot is not None:
-            query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if prev_snapshot_url is not None:
-            header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('PageList', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Resize the Blob.
-
-        :param blob_content_length: This header specifies the maximum size for
-         the page blob, up to 1 TB. The page blob size must be aligned to a
-         512-byte boundary.
-        :type blob_content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.resize.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    resize.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Update the sequence number of the blob.
-
-        :param sequence_number_action: Required if the
-         x-ms-blob-sequence-number header is set for the request. This property
-         applies to page blobs only. This property indicates how the service
-         should modify the blob's sequence number. Possible values include:
-         'max', 'update', 'increment'
-        :type sequence_number_action: str or
-         ~azure.storage.blob.models.SequenceNumberActionType
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param blob_sequence_number: Set for page blobs only. The sequence
-         number is a user-controlled value that you can use to track requests.
-         The value of the sequence number must be between 0 and 2^63 - 1.
-        :type blob_sequence_number: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.update_sequence_number.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType')
-        if blob_sequence_number is not None:
-            header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    update_sequence_number.metadata = {'url': '/{containerName}/{blob}'}
-
-    async def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """The Copy Incremental operation copies a snapshot of the source page
-        blob to a destination page blob. The snapshot is copied such that only
-        the differential changes between the previously copied snapshot are
-        transferred to the destination. The copied snapshots are complete
-        copies of the original snapshot and can be read or copied from as
-        usual. This API is supported since REST version 2016-05-31.
-
-        :param copy_source: Specifies the name of the source page blob
-         snapshot. This value is a URL of up to 2 KB in length that specifies a
-         page blob snapshot. The value should be URL-encoded as it would appear
-         in a request URI. The source blob must either be public or must be
-         authenticated via a shared access signature.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "incrementalcopy"
-
-        # Construct URL
-        url = self.copy_incremental.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    copy_incremental.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,567 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class ServiceOperations:
-    """ServiceOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Sets properties for a storage account's Blob service endpoint,
-        including properties for Storage Analytics and CORS (Cross-Origin
-        Resource Sharing) rules.
-
-        :param storage_service_properties: The StorageService properties.
-        :type storage_service_properties:
-         ~azure.storage.blob.models.StorageServiceProperties
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/'}
-
-    async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """gets the properties of a storage account's Blob service, including
-        properties for Storage Analytics and CORS (Cross-Origin Resource
-        Sharing) rules.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceProperties or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.StorageServiceProperties
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceProperties', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_properties.metadata = {'url': '/'}
-
-    async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Retrieves statistics related to replication for the Blob service. It is
-        only available on the secondary location endpoint when read-access
-        geo-redundant replication is enabled for the storage account.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceStats or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.StorageServiceStats
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "stats"
-
-        # Construct URL
-        url = self.get_statistics.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceStats', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_statistics.metadata = {'url': '/'}
-
-    async def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The List Containers Segment operation returns a list of the containers
-        under the specified account.
-
-        :param prefix: Filters the results to return only containers whose
-         name begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of containers to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all containers remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of containers to
-         return. If the request does not specify maxresults, or specifies a
-         value greater than 5000, the server will return up to 5000 items. Note
-         that if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify that the container's
-         metadata be returned as part of the response body. Possible values
-         include: 'metadata'
-        :type include: str or
-         ~azure.storage.blob.models.ListContainersIncludeType
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListContainersSegmentResponse or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_containers_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, 'ListContainersIncludeType')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListContainersSegmentResponse', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_containers_segment.metadata = {'url': '/'}
-
-    async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Retrieves a user delegation key for the Blob service. This is only a
-        valid operation when using bearer token authentication.
-
-        :param key_info:
-        :type key_info: ~azure.storage.blob.models.KeyInfo
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: UserDelegationKey or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.UserDelegationKey
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "userdelegationkey"
-
-        # Construct URL
-        url = self.get_user_delegation_key.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(key_info, 'KeyInfo')
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('UserDelegationKey', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_user_delegation_key.metadata = {'url': '/'}
-
-    async def get_account_info(self, *, cls=None, **kwargs):
-        """Returns the sku name and account kind .
-
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "account"
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_account_info.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
-                'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_account_info.metadata = {'url': '/'}
-
-    async def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Batch operation allows multiple API calls to be embedded into a
-        single HTTP request.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param multipart_content_type: Required. The value of this header must
-         be multipart/mixed with a batch boundary. Example header value:
-         multipart/mixed; boundary=batch_<GUID>
-        :type multipart_content_type: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "batch"
-
-        # Construct URL
-        url = self.submit_batch.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            await response.load_body()
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    submit_batch.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,192 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-try:
-    from ._models_py3 import AccessPolicy
-    from ._models_py3 import AppendPositionAccessConditions
-    from ._models_py3 import BlobFlatListSegment
-    from ._models_py3 import BlobHierarchyListSegment
-    from ._models_py3 import BlobHTTPHeaders
-    from ._models_py3 import BlobItem
-    from ._models_py3 import BlobMetadata
-    from ._models_py3 import BlobPrefix
-    from ._models_py3 import BlobProperties
-    from ._models_py3 import Block
-    from ._models_py3 import BlockList
-    from ._models_py3 import BlockLookupList
-    from ._models_py3 import ClearRange
-    from ._models_py3 import ContainerCpkScopeInfo
-    from ._models_py3 import ContainerItem
-    from ._models_py3 import ContainerProperties
-    from ._models_py3 import CorsRule
-    from ._models_py3 import CpkInfo
-    from ._models_py3 import CpkScopeInfo
-    from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException
-    from ._models_py3 import DataLakeStorageErrorError
-    from ._models_py3 import DirectoryHttpHeaders
-    from ._models_py3 import GeoReplication
-    from ._models_py3 import KeyInfo
-    from ._models_py3 import LeaseAccessConditions
-    from ._models_py3 import ListBlobsFlatSegmentResponse
-    from ._models_py3 import ListBlobsHierarchySegmentResponse
-    from ._models_py3 import ListContainersSegmentResponse
-    from ._models_py3 import Logging
-    from ._models_py3 import Metrics
-    from ._models_py3 import ModifiedAccessConditions
-    from ._models_py3 import PageList
-    from ._models_py3 import PageRange
-    from ._models_py3 import RetentionPolicy
-    from ._models_py3 import SequenceNumberAccessConditions
-    from ._models_py3 import SignedIdentifier
-    from ._models_py3 import SourceModifiedAccessConditions
-    from ._models_py3 import StaticWebsite
-    from ._models_py3 import StorageError, StorageErrorException
-    from ._models_py3 import StorageServiceProperties
-    from ._models_py3 import StorageServiceStats
-    from ._models_py3 import UserDelegationKey
-except (SyntaxError, ImportError):
-    from ._models import AccessPolicy
-    from ._models import AppendPositionAccessConditions
-    from ._models import BlobFlatListSegment
-    from ._models import BlobHierarchyListSegment
-    from ._models import BlobHTTPHeaders
-    from ._models import BlobItem
-    from ._models import BlobMetadata
-    from ._models import BlobPrefix
-    from ._models import BlobProperties
-    from ._models import Block
-    from ._models import BlockList
-    from ._models import BlockLookupList
-    from ._models import ClearRange
-    from ._models import ContainerCpkScopeInfo
-    from ._models import ContainerItem
-    from ._models import ContainerProperties
-    from ._models import CorsRule
-    from ._models import CpkInfo
-    from ._models import CpkScopeInfo
-    from ._models import DataLakeStorageError, DataLakeStorageErrorException
-    from ._models import DataLakeStorageErrorError
-    from ._models import DirectoryHttpHeaders
-    from ._models import GeoReplication
-    from ._models import KeyInfo
-    from ._models import LeaseAccessConditions
-    from ._models import ListBlobsFlatSegmentResponse
-    from ._models import ListBlobsHierarchySegmentResponse
-    from ._models import ListContainersSegmentResponse
-    from ._models import Logging
-    from ._models import Metrics
-    from ._models import ModifiedAccessConditions
-    from ._models import PageList
-    from ._models import PageRange
-    from ._models import RetentionPolicy
-    from ._models import SequenceNumberAccessConditions
-    from ._models import SignedIdentifier
-    from ._models import SourceModifiedAccessConditions
-    from ._models import StaticWebsite
-    from ._models import StorageError, StorageErrorException
-    from ._models import StorageServiceProperties
-    from ._models import StorageServiceStats
-    from ._models import UserDelegationKey
-from ._azure_blob_storage_enums import (
-    AccessTier,
-    AccessTierOptional,
-    AccessTierRequired,
-    AccountKind,
-    ArchiveStatus,
-    BlobType,
-    BlockListType,
-    CopyStatusType,
-    DeleteSnapshotsOptionType,
-    EncryptionAlgorithmType,
-    GeoReplicationStatusType,
-    LeaseDurationType,
-    LeaseStateType,
-    LeaseStatusType,
-    ListBlobsIncludeItem,
-    ListContainersIncludeType,
-    PathRenameMode,
-    PremiumPageBlobAccessTier,
-    PublicAccessType,
-    RehydratePriority,
-    SequenceNumberActionType,
-    SkuName,
-    StorageErrorCode,
-    SyncCopyStatusType,
-)
-
-__all__ = [
-    'AccessPolicy',
-    'AppendPositionAccessConditions',
-    'BlobFlatListSegment',
-    'BlobHierarchyListSegment',
-    'BlobHTTPHeaders',
-    'BlobItem',
-    'BlobMetadata',
-    'BlobPrefix',
-    'BlobProperties',
-    'Block',
-    'BlockList',
-    'BlockLookupList',
-    'ClearRange',
-    'ContainerCpkScopeInfo',
-    'ContainerItem',
-    'ContainerProperties',
-    'CorsRule',
-    'CpkInfo',
-    'CpkScopeInfo',
-    'DataLakeStorageError', 'DataLakeStorageErrorException',
-    'DataLakeStorageErrorError',
-    'DirectoryHttpHeaders',
-    'GeoReplication',
-    'KeyInfo',
-    'LeaseAccessConditions',
-    'ListBlobsFlatSegmentResponse',
-    'ListBlobsHierarchySegmentResponse',
-    'ListContainersSegmentResponse',
-    'Logging',
-    'Metrics',
-    'ModifiedAccessConditions',
-    'PageList',
-    'PageRange',
-    'RetentionPolicy',
-    'SequenceNumberAccessConditions',
-    'SignedIdentifier',
-    'SourceModifiedAccessConditions',
-    'StaticWebsite',
-    'StorageError', 'StorageErrorException',
-    'StorageServiceProperties',
-    'StorageServiceStats',
-    'UserDelegationKey',
-    'PublicAccessType',
-    'CopyStatusType',
-    'LeaseDurationType',
-    'LeaseStateType',
-    'LeaseStatusType',
-    'AccessTier',
-    'ArchiveStatus',
-    'BlobType',
-    'StorageErrorCode',
-    'GeoReplicationStatusType',
-    'AccessTierRequired',
-    'AccessTierOptional',
-    'PremiumPageBlobAccessTier',
-    'RehydratePriority',
-    'BlockListType',
-    'DeleteSnapshotsOptionType',
-    'EncryptionAlgorithmType',
-    'ListBlobsIncludeItem',
-    'ListContainersIncludeType',
-    'PathRenameMode',
-    'SequenceNumberActionType',
-    'SkuName',
-    'AccountKind',
-    'SyncCopyStatusType',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_azure_blob_storage_enums.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_azure_blob_storage_enums.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_azure_blob_storage_enums.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_azure_blob_storage_enums.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,323 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-class PublicAccessType(str, Enum):
-
-    container = "container"
-    blob = "blob"
-
-
-class CopyStatusType(str, Enum):
-
-    pending = "pending"
-    success = "success"
-    aborted = "aborted"
-    failed = "failed"
-
-
-class LeaseDurationType(str, Enum):
-
-    infinite = "infinite"
-    fixed = "fixed"
-
-
-class LeaseStateType(str, Enum):
-
-    available = "available"
-    leased = "leased"
-    expired = "expired"
-    breaking = "breaking"
-    broken = "broken"
-
-
-class LeaseStatusType(str, Enum):
-
-    locked = "locked"
-    unlocked = "unlocked"
-
-
-class AccessTier(str, Enum):
-
-    p4 = "P4"
-    p6 = "P6"
-    p10 = "P10"
-    p15 = "P15"
-    p20 = "P20"
-    p30 = "P30"
-    p40 = "P40"
-    p50 = "P50"
-    p60 = "P60"
-    p70 = "P70"
-    p80 = "P80"
-    hot = "Hot"
-    cool = "Cool"
-    archive = "Archive"
-
-
-class ArchiveStatus(str, Enum):
-
-    rehydrate_pending_to_hot = "rehydrate-pending-to-hot"
-    rehydrate_pending_to_cool = "rehydrate-pending-to-cool"
-
-
-class BlobType(str, Enum):
-
-    block_blob = "BlockBlob"
-    page_blob = "PageBlob"
-    append_blob = "AppendBlob"
-
-
-class StorageErrorCode(str, Enum):
-
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-    append_position_condition_not_met = "AppendPositionConditionNotMet"
-    blob_already_exists = "BlobAlreadyExists"
-    blob_not_found = "BlobNotFound"
-    blob_overwritten = "BlobOverwritten"
-    blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
-    block_count_exceeds_limit = "BlockCountExceedsLimit"
-    block_list_too_long = "BlockListTooLong"
-    cannot_change_to_lower_tier = "CannotChangeToLowerTier"
-    cannot_verify_copy_source = "CannotVerifyCopySource"
-    container_already_exists = "ContainerAlreadyExists"
-    container_being_deleted = "ContainerBeingDeleted"
-    container_disabled = "ContainerDisabled"
-    container_not_found = "ContainerNotFound"
-    content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
-    copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
-    copy_id_mismatch = "CopyIdMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-    incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
-    incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
-    incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
-    infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
-    invalid_blob_or_block = "InvalidBlobOrBlock"
-    invalid_blob_tier = "InvalidBlobTier"
-    invalid_blob_type = "InvalidBlobType"
-    invalid_block_id = "InvalidBlockId"
-    invalid_block_list = "InvalidBlockList"
-    invalid_operation = "InvalidOperation"
-    invalid_page_range = "InvalidPageRange"
-    invalid_source_blob_type = "InvalidSourceBlobType"
-    invalid_source_blob_url = "InvalidSourceBlobUrl"
-    invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
-    lease_already_present = "LeaseAlreadyPresent"
-    lease_already_broken = "LeaseAlreadyBroken"
-    lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
-    lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
-    lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
-    lease_id_missing = "LeaseIdMissing"
-    lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
-    lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
-    lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
-    lease_lost = "LeaseLost"
-    lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
-    lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
-    lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
-    max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
-    no_authentication_information = "NoAuthenticationInformation"
-    no_pending_copy_operation = "NoPendingCopyOperation"
-    operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
-    pending_copy_operation = "PendingCopyOperation"
-    previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
-    previous_snapshot_not_found = "PreviousSnapshotNotFound"
-    previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
-    sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
-    sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
-    snapshot_count_exceeded = "SnapshotCountExceeded"
-    snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
-    snapshots_present = "SnapshotsPresent"
-    source_condition_not_met = "SourceConditionNotMet"
-    system_in_use = "SystemInUse"
-    target_condition_not_met = "TargetConditionNotMet"
-    unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
-    blob_being_rehydrated = "BlobBeingRehydrated"
-    blob_archived = "BlobArchived"
-    blob_not_archived = "BlobNotArchived"
-    authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch"
-    authorization_protocol_mismatch = "AuthorizationProtocolMismatch"
-    authorization_permission_mismatch = "AuthorizationPermissionMismatch"
-    authorization_service_mismatch = "AuthorizationServiceMismatch"
-    authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch"
-
-
-class GeoReplicationStatusType(str, Enum):
-
-    live = "live"
-    bootstrap = "bootstrap"
-    unavailable = "unavailable"
-
-
-class AccessTierRequired(str, Enum):
-
-    p4 = "P4"
-    p6 = "P6"
-    p10 = "P10"
-    p15 = "P15"
-    p20 = "P20"
-    p30 = "P30"
-    p40 = "P40"
-    p50 = "P50"
-    p60 = "P60"
-    p70 = "P70"
-    p80 = "P80"
-    hot = "Hot"
-    cool = "Cool"
-    archive = "Archive"
-
-
-class AccessTierOptional(str, Enum):
-
-    p4 = "P4"
-    p6 = "P6"
-    p10 = "P10"
-    p15 = "P15"
-    p20 = "P20"
-    p30 = "P30"
-    p40 = "P40"
-    p50 = "P50"
-    p60 = "P60"
-    p70 = "P70"
-    p80 = "P80"
-    hot = "Hot"
-    cool = "Cool"
-    archive = "Archive"
-
-
-class PremiumPageBlobAccessTier(str, Enum):
-
-    p4 = "P4"
-    p6 = "P6"
-    p10 = "P10"
-    p15 = "P15"
-    p20 = "P20"
-    p30 = "P30"
-    p40 = "P40"
-    p50 = "P50"
-    p60 = "P60"
-    p70 = "P70"
-    p80 = "P80"
-
-
-class RehydratePriority(str, Enum):
-
-    high = "High"
-    standard = "Standard"
-
-
-class BlockListType(str, Enum):
-
-    committed = "committed"
-    uncommitted = "uncommitted"
-    all = "all"
-
-
-class DeleteSnapshotsOptionType(str, Enum):
-
-    include = "include"
-    only = "only"
-
-
-class EncryptionAlgorithmType(str, Enum):
-
-    aes256 = "AES256"
-
-
-class ListBlobsIncludeItem(str, Enum):
-
-    copy = "copy"
-    deleted = "deleted"
-    metadata = "metadata"
-    snapshots = "snapshots"
-    uncommittedblobs = "uncommittedblobs"
-
-
-class ListContainersIncludeType(str, Enum):
-
-    metadata = "metadata"
-
-
-class PathRenameMode(str, Enum):
-
-    legacy = "legacy"
-    posix = "posix"
-
-
-class SequenceNumberActionType(str, Enum):
-
-    max = "max"
-    update = "update"
-    increment = "increment"
-
-
-class SkuName(str, Enum):
-
-    standard_lrs = "Standard_LRS"
-    standard_grs = "Standard_GRS"
-    standard_ragrs = "Standard_RAGRS"
-    standard_zrs = "Standard_ZRS"
-    premium_lrs = "Premium_LRS"
-
-
-class AccountKind(str, Enum):
-
-    storage = "Storage"
-    blob_storage = "BlobStorage"
-    storage_v2 = "StorageV2"
-
-
-class SyncCopyStatusType(str, Enum):
-
-    success = "success"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1581 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class AccessPolicy(Model):
-    """An Access policy.
-
-    :param start: the date-time the policy is active
-    :type start: str
-    :param expiry: the date-time the policy expires
-    :type expiry: str
-    :param permission: the permissions for the acl policy
-    :type permission: str
-    """
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-        'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(AccessPolicy, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.expiry = kwargs.get('expiry', None)
-        self.permission = kwargs.get('permission', None)
-
-
-class AppendPositionAccessConditions(Model):
-    """Additional parameters for a set of operations, such as:
-    AppendBlob_append_block, AppendBlob_append_block_from_url.
-
-    :param max_size: Optional conditional header. The max length in bytes
-     permitted for the append blob. If the Append Block operation would cause
-     the blob to exceed that limit or if the blob size is already greater than
-     the value specified in this header, the request will fail with
-     MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition
-     Failed).
-    :type max_size: long
-    :param append_position: Optional conditional header, used only for the
-     Append Block operation. A number indicating the byte offset to compare.
-     Append Block will succeed only if the append position is equal to this
-     number. If it is not, the request will fail with the
-     AppendPositionConditionNotMet error (HTTP status code 412 - Precondition
-     Failed).
-    :type append_position: long
-    """
-
-    _attribute_map = {
-        'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}},
-        'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(AppendPositionAccessConditions, self).__init__(**kwargs)
-        self.max_size = kwargs.get('max_size', None)
-        self.append_position = kwargs.get('append_position', None)
-
-
-class BlobFlatListSegment(Model):
-    """BlobFlatListSegment.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param blob_items: Required.
-    :type blob_items: list[~azure.storage.blob.models.BlobItem]
-    """
-
-    _validation = {
-        'blob_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}},
-    }
-    _xml_map = {
-        'name': 'Blobs'
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobFlatListSegment, self).__init__(**kwargs)
-        self.blob_items = kwargs.get('blob_items', None)
-
-
-class BlobHierarchyListSegment(Model):
-    """BlobHierarchyListSegment.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param blob_prefixes:
-    :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
-    :param blob_items: Required.
-    :type blob_items: list[~azure.storage.blob.models.BlobItem]
-    """
-
-    _validation = {
-        'blob_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}},
-        'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}},
-    }
-    _xml_map = {
-        'name': 'Blobs'
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobHierarchyListSegment, self).__init__(**kwargs)
-        self.blob_prefixes = kwargs.get('blob_prefixes', None)
-        self.blob_items = kwargs.get('blob_items', None)
-
-
-class BlobHTTPHeaders(Model):
-    """Additional parameters for a set of operations.
-
-    :param blob_cache_control: Optional. Sets the blob's cache control. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type blob_cache_control: str
-    :param blob_content_type: Optional. Sets the blob's content type. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type blob_content_type: str
-    :param blob_content_md5: Optional. An MD5 hash of the blob content. Note
-     that this hash is not validated, as the hashes for the individual blocks
-     were validated when each was uploaded.
-    :type blob_content_md5: bytearray
-    :param blob_content_encoding: Optional. Sets the blob's content encoding.
-     If specified, this property is stored with the blob and returned with a
-     read request.
-    :type blob_content_encoding: str
-    :param blob_content_language: Optional. Set the blob's content language.
-     If specified, this property is stored with the blob and returned with a
-     read request.
-    :type blob_content_language: str
-    :param blob_content_disposition: Optional. Sets the blob's
-     Content-Disposition header.
-    :type blob_content_disposition: str
-    """
-
-    _attribute_map = {
-        'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}},
-        'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}},
-        'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}},
-        'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}},
-        'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}},
-        'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobHTTPHeaders, self).__init__(**kwargs)
-        self.blob_cache_control = kwargs.get('blob_cache_control', None)
-        self.blob_content_type = kwargs.get('blob_content_type', None)
-        self.blob_content_md5 = kwargs.get('blob_content_md5', None)
-        self.blob_content_encoding = kwargs.get('blob_content_encoding', None)
-        self.blob_content_language = kwargs.get('blob_content_language', None)
-        self.blob_content_disposition = kwargs.get('blob_content_disposition', None)
-
-
-class BlobItem(Model):
-    """An Azure Storage blob.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param deleted: Required.
-    :type deleted: bool
-    :param snapshot: Required.
-    :type snapshot: str
-    :param properties: Required.
-    :type properties: ~azure.storage.blob.models.BlobProperties
-    :param metadata:
-    :type metadata: ~azure.storage.blob.models.BlobMetadata
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'deleted': {'required': True},
-        'snapshot': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
-        'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
-        'properties': {'key': 'Properties', 'type': 'BlobProperties', 'xml': {'name': 'Properties'}},
-        'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Blob'
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobItem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.deleted = kwargs.get('deleted', None)
-        self.snapshot = kwargs.get('snapshot', None)
-        self.properties = kwargs.get('properties', None)
-        self.metadata = kwargs.get('metadata', None)
-
-
-class BlobMetadata(Model):
-    """BlobMetadata.
-
-    :param additional_properties: Unmatched properties from the message are
-     deserialized this collection
-    :type additional_properties: dict[str, str]
-    :param encrypted:
-    :type encrypted: str
-    """
-
-    _attribute_map = {
-        'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}},
-        'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}},
-    }
-    _xml_map = {
-        'name': 'Metadata'
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobMetadata, self).__init__(**kwargs)
-        self.additional_properties = kwargs.get('additional_properties', None)
-        self.encrypted = kwargs.get('encrypted', None)
-
-
-class BlobPrefix(Model):
-    """BlobPrefix.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    """
-
-    _validation = {
-        'name': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobPrefix, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-
-
-class BlobProperties(Model):
-    """Properties of a blob.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param creation_time:
-    :type creation_time: datetime
-    :param last_modified: Required.
-    :type last_modified: datetime
-    :param etag: Required.
-    :type etag: str
-    :param content_length: Size in bytes
-    :type content_length: long
-    :param content_type:
-    :type content_type: str
-    :param content_encoding:
-    :type content_encoding: str
-    :param content_language:
-    :type content_language: str
-    :param content_md5:
-    :type content_md5: bytearray
-    :param content_disposition:
-    :type content_disposition: str
-    :param cache_control:
-    :type cache_control: str
-    :param blob_sequence_number:
-    :type blob_sequence_number: long
-    :param blob_type: Possible values include: 'BlockBlob', 'PageBlob',
-     'AppendBlob'
-    :type blob_type: str or ~azure.storage.blob.models.BlobType
-    :param lease_status: Possible values include: 'locked', 'unlocked'
-    :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
-    :param lease_state: Possible values include: 'available', 'leased',
-     'expired', 'breaking', 'broken'
-    :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
-    :param lease_duration: Possible values include: 'infinite', 'fixed'
-    :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
-    :param copy_id:
-    :type copy_id: str
-    :param copy_status: Possible values include: 'pending', 'success',
-     'aborted', 'failed'
-    :type copy_status: str or ~azure.storage.blob.models.CopyStatusType
-    :param copy_source:
-    :type copy_source: str
-    :param copy_progress:
-    :type copy_progress: str
-    :param copy_completion_time:
-    :type copy_completion_time: datetime
-    :param copy_status_description:
-    :type copy_status_description: str
-    :param server_encrypted:
-    :type server_encrypted: bool
-    :param incremental_copy:
-    :type incremental_copy: bool
-    :param destination_snapshot:
-    :type destination_snapshot: str
-    :param deleted_time:
-    :type deleted_time: datetime
-    :param remaining_retention_days:
-    :type remaining_retention_days: int
-    :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15',
-     'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-    :type access_tier: str or ~azure.storage.blob.models.AccessTier
-    :param access_tier_inferred:
-    :type access_tier_inferred: bool
-    :param archive_status: Possible values include:
-     'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool'
-    :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus
-    :param customer_provided_key_sha256:
-    :type customer_provided_key_sha256: str
-    :param encryption_scope: The name of the encryption scope under which the
-     blob is encrypted.
-    :type encryption_scope: str
-    :param access_tier_change_time:
-    :type access_tier_change_time: datetime
-    """
-
-    _validation = {
-        'last_modified': {'required': True},
-        'etag': {'required': True},
-    }
-
-    _attribute_map = {
-        'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}},
-        'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
-        'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
-        'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
-        'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}},
-        'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}},
-        'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}},
-        'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}},
-        'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}},
-        'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}},
-        'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}},
-        'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}},
-        'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
-        'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
-        'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
-        'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}},
-        'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}},
-        'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}},
-        'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}},
-        'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}},
-        'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}},
-        'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}},
-        'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}},
-        'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}},
-        'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
-        'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
-        'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}},
-        'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}},
-        'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}},
-        'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}},
-        'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}},
-        'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}},
-    }
-    _xml_map = {
-        'name': 'Properties'
-    }
-
-    def __init__(self, **kwargs):
-        super(BlobProperties, self).__init__(**kwargs)
-        self.creation_time = kwargs.get('creation_time', None)
-        self.last_modified = kwargs.get('last_modified', None)
-        self.etag = kwargs.get('etag', None)
-        self.content_length = kwargs.get('content_length', None)
-        self.content_type = kwargs.get('content_type', None)
-        self.content_encoding = kwargs.get('content_encoding', None)
-        self.content_language = kwargs.get('content_language', None)
-        self.content_md5 = kwargs.get('content_md5', None)
-        self.content_disposition = kwargs.get('content_disposition', None)
-        self.cache_control = kwargs.get('cache_control', None)
-        self.blob_sequence_number = kwargs.get('blob_sequence_number', None)
-        self.blob_type = kwargs.get('blob_type', None)
-        self.lease_status = kwargs.get('lease_status', None)
-        self.lease_state = kwargs.get('lease_state', None)
-        self.lease_duration = kwargs.get('lease_duration', None)
-        self.copy_id = kwargs.get('copy_id', None)
-        self.copy_status = kwargs.get('copy_status', None)
-        self.copy_source = kwargs.get('copy_source', None)
-        self.copy_progress = kwargs.get('copy_progress', None)
-        self.copy_completion_time = kwargs.get('copy_completion_time', None)
-        self.copy_status_description = kwargs.get('copy_status_description', None)
-        self.server_encrypted = kwargs.get('server_encrypted', None)
-        self.incremental_copy = kwargs.get('incremental_copy', None)
-        self.destination_snapshot = kwargs.get('destination_snapshot', None)
-        self.deleted_time = kwargs.get('deleted_time', None)
-        self.remaining_retention_days = kwargs.get('remaining_retention_days', None)
-        self.access_tier = kwargs.get('access_tier', None)
-        self.access_tier_inferred = kwargs.get('access_tier_inferred', None)
-        self.archive_status = kwargs.get('archive_status', None)
-        self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None)
-        self.encryption_scope = kwargs.get('encryption_scope', None)
-        self.access_tier_change_time = kwargs.get('access_tier_change_time', None)
-
-
-class Block(Model):
-    """Represents a single block in a block blob.  It describes the block's ID and
-    size.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required. The base64 encoded block ID.
-    :type name: str
-    :param size: Required. The block size in bytes.
-    :type size: int
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'size': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(Block, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.size = kwargs.get('size', None)
-
-
-class BlockList(Model):
-    """BlockList.
-
-    :param committed_blocks:
-    :type committed_blocks: list[~azure.storage.blob.models.Block]
-    :param uncommitted_blocks:
-    :type uncommitted_blocks: list[~azure.storage.blob.models.Block]
-    """
-
-    _attribute_map = {
-        'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
-        'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(BlockList, self).__init__(**kwargs)
-        self.committed_blocks = kwargs.get('committed_blocks', None)
-        self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None)
-
-
-class BlockLookupList(Model):
-    """BlockLookupList.
-
-    :param committed:
-    :type committed: list[str]
-    :param uncommitted:
-    :type uncommitted: list[str]
-    :param latest:
-    :type latest: list[str]
-    """
-
-    _attribute_map = {
-        'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}},
-        'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}},
-        'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}},
-    }
-    _xml_map = {
-        'name': 'BlockList'
-    }
-
-    def __init__(self, **kwargs):
-        super(BlockLookupList, self).__init__(**kwargs)
-        self.committed = kwargs.get('committed', None)
-        self.uncommitted = kwargs.get('uncommitted', None)
-        self.latest = kwargs.get('latest', None)
-
-
-class ClearRange(Model):
-    """ClearRange.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required.
-    :type start: long
-    :param end: Required.
-    :type end: long
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'end': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
-        'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
-    }
-    _xml_map = {
-        'name': 'ClearRange'
-    }
-
-    def __init__(self, **kwargs):
-        super(ClearRange, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.end = kwargs.get('end', None)
-
-
-class ContainerCpkScopeInfo(Model):
-    """Additional parameters for create operation.
-
-    :param default_encryption_scope: Optional.  Version 2019-07-07 and later.
-     Specifies the default encryption scope to set on the container and use for
-     all future writes.
-    :type default_encryption_scope: str
-    :param prevent_encryption_scope_override: Optional.  Version 2019-07-07
-     and newer.  If true, prevents any request from specifying a different
-     encryption scope than the scope set on the container.
-    :type prevent_encryption_scope_override: bool
-    """
-
-    _attribute_map = {
-        'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}},
-        'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(ContainerCpkScopeInfo, self).__init__(**kwargs)
-        self.default_encryption_scope = kwargs.get('default_encryption_scope', None)
-        self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None)
-
-
-class ContainerItem(Model):
-    """An Azure Storage container.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param properties: Required.
-    :type properties: ~azure.storage.blob.models.ContainerProperties
-    :param metadata:
-    :type metadata: dict[str, str]
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}},
-        'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Container'
-    }
-
-    def __init__(self, **kwargs):
-        super(ContainerItem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.properties = kwargs.get('properties', None)
-        self.metadata = kwargs.get('metadata', None)
-
-
-class ContainerProperties(Model):
-    """Properties of a container.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param last_modified: Required.
-    :type last_modified: datetime
-    :param etag: Required.
-    :type etag: str
-    :param lease_status: Possible values include: 'locked', 'unlocked'
-    :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
-    :param lease_state: Possible values include: 'available', 'leased',
-     'expired', 'breaking', 'broken'
-    :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
-    :param lease_duration: Possible values include: 'infinite', 'fixed'
-    :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
-    :param public_access: Possible values include: 'container', 'blob'
-    :type public_access: str or ~azure.storage.blob.models.PublicAccessType
-    :param has_immutability_policy:
-    :type has_immutability_policy: bool
-    :param has_legal_hold:
-    :type has_legal_hold: bool
-    :param default_encryption_scope:
-    :type default_encryption_scope: str
-    :param prevent_encryption_scope_override:
-    :type prevent_encryption_scope_override: bool
-    """
-
-    _validation = {
-        'last_modified': {'required': True},
-        'etag': {'required': True},
-    }
-
-    _attribute_map = {
-        'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
-        'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
-        'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
-        'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
-        'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
-        'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}},
-        'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}},
-        'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}},
-        'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}},
-        'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(ContainerProperties, self).__init__(**kwargs)
-        self.last_modified = kwargs.get('last_modified', None)
-        self.etag = kwargs.get('etag', None)
-        self.lease_status = kwargs.get('lease_status', None)
-        self.lease_state = kwargs.get('lease_state', None)
-        self.lease_duration = kwargs.get('lease_duration', None)
-        self.public_access = kwargs.get('public_access', None)
-        self.has_immutability_policy = kwargs.get('has_immutability_policy', None)
-        self.has_legal_hold = kwargs.get('has_legal_hold', None)
-        self.default_encryption_scope = kwargs.get('default_encryption_scope', None)
-        self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None)
-
-
-class CorsRule(Model):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param allowed_origins: Required. The origin domains that are permitted to
-     make a request against the storage service via CORS. The origin domain is
-     the domain from which the request originates. Note that the origin must be
-     an exact case-sensitive match with the origin that the user age sends to
-     the service. You can also use the wildcard character '*' to allow all
-     origin domains to make requests via CORS.
-    :type allowed_origins: str
-    :param allowed_methods: Required. The methods (HTTP request verbs) that
-     the origin domain may use for a CORS request. (comma separated)
-    :type allowed_methods: str
-    :param allowed_headers: Required. the request headers that the origin
-     domain may specify on the CORS request.
-    :type allowed_headers: str
-    :param exposed_headers: Required. The response headers that may be sent in
-     the response to the CORS request and exposed by the browser to the request
-     issuer
-    :type exposed_headers: str
-    :param max_age_in_seconds: Required. The maximum amount time that a
-     browser should cache the preflight OPTIONS request.
-    :type max_age_in_seconds: int
-    """
-
-    _validation = {
-        'allowed_origins': {'required': True},
-        'allowed_methods': {'required': True},
-        'allowed_headers': {'required': True},
-        'exposed_headers': {'required': True},
-        'max_age_in_seconds': {'required': True, 'minimum': 0},
-    }
-
-    _attribute_map = {
-        'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
-        'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
-        'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
-        'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
-        'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(CorsRule, self).__init__(**kwargs)
-        self.allowed_origins = kwargs.get('allowed_origins', None)
-        self.allowed_methods = kwargs.get('allowed_methods', None)
-        self.allowed_headers = kwargs.get('allowed_headers', None)
-        self.exposed_headers = kwargs.get('exposed_headers', None)
-        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
-
-
-class CpkInfo(Model):
-    """Additional parameters for a set of operations.
-
-    :param encryption_key: Optional. Specifies the encryption key to use to
-     encrypt the data provided in the request. If not specified, encryption is
-     performed with the root account encryption key.  For more information, see
-     Encryption at Rest for Azure Storage Services.
-    :type encryption_key: str
-    :param encryption_key_sha256: The SHA-256 hash of the provided encryption
-     key. Must be provided if the x-ms-encryption-key header is provided.
-    :type encryption_key_sha256: str
-    :param encryption_algorithm: The algorithm used to produce the encryption
-     key hash. Currently, the only accepted value is "AES256". Must be provided
-     if the x-ms-encryption-key header is provided. Possible values include:
-     'AES256'
-    :type encryption_algorithm: str or
-     ~azure.storage.blob.models.EncryptionAlgorithmType
-    """
-
-    _attribute_map = {
-        'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}},
-        'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}},
-        'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(CpkInfo, self).__init__(**kwargs)
-        self.encryption_key = kwargs.get('encryption_key', None)
-        self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None)
-        self.encryption_algorithm = kwargs.get('encryption_algorithm', None)
-
-
-class CpkScopeInfo(Model):
-    """Additional parameters for a set of operations.
-
-    :param encryption_scope: Optional. Version 2019-07-07 and later.
-     Specifies the name of the encryption scope to use to encrypt the data
-     provided in the request. If not specified, encryption is performed with
-     the default account encryption scope.  For more information, see
-     Encryption at Rest for Azure Storage Services.
-    :type encryption_scope: str
-    """
-
-    _attribute_map = {
-        'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(CpkScopeInfo, self).__init__(**kwargs)
-        self.encryption_scope = kwargs.get('encryption_scope', None)
-
-
-class DataLakeStorageError(Model):
-    """DataLakeStorageError.
-
-    :param error: The service error response object.
-    :type error: ~azure.storage.blob.models.DataLakeStorageErrorError
-    """
-
-    _attribute_map = {
-        'error': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(DataLakeStorageError, self).__init__(**kwargs)
-        self.error = kwargs.get('error', None)
-
-
-class DataLakeStorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'DataLakeStorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'DataLakeStorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(DataLakeStorageErrorException, self).__init__(response=response)
-
-
-class DataLakeStorageErrorError(Model):
-    """The service error response object.
-
-    :param code: The service error code.
-    :type code: str
-    :param message: The service error message.
-    :type message: str
-    """
-
-    _attribute_map = {
-        'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}},
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(DataLakeStorageErrorError, self).__init__(**kwargs)
-        self.code = kwargs.get('code', None)
-        self.message = kwargs.get('message', None)
-
-
-class DirectoryHttpHeaders(Model):
-    """Additional parameters for a set of operations, such as: Directory_create,
-    Directory_rename, Blob_rename.
-
-    :param cache_control: Cache control for given resource
-    :type cache_control: str
-    :param content_type: Content type for given resource
-    :type content_type: str
-    :param content_encoding: Content encoding for given resource
-    :type content_encoding: str
-    :param content_language: Content language for given resource
-    :type content_language: str
-    :param content_disposition: Content disposition for given resource
-    :type content_disposition: str
-    """
-
-    _attribute_map = {
-        'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}},
-        'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}},
-        'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}},
-        'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}},
-        'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(DirectoryHttpHeaders, self).__init__(**kwargs)
-        self.cache_control = kwargs.get('cache_control', None)
-        self.content_type = kwargs.get('content_type', None)
-        self.content_encoding = kwargs.get('content_encoding', None)
-        self.content_language = kwargs.get('content_language', None)
-        self.content_disposition = kwargs.get('content_disposition', None)
-
-
-class GeoReplication(Model):
-    """Geo-Replication information for the Secondary Storage Service.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param status: Required. The status of the secondary location. Possible
-     values include: 'live', 'bootstrap', 'unavailable'
-    :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType
-    :param last_sync_time: Required. A GMT date/time value, to the second. All
-     primary writes preceding this value are guaranteed to be available for
-     read operations at the secondary. Primary writes after this point in time
-     may or may not be available for reads.
-    :type last_sync_time: datetime
-    """
-
-    _validation = {
-        'status': {'required': True},
-        'last_sync_time': {'required': True},
-    }
-
-    _attribute_map = {
-        'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
-        'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(GeoReplication, self).__init__(**kwargs)
-        self.status = kwargs.get('status', None)
-        self.last_sync_time = kwargs.get('last_sync_time', None)
-
-
-class KeyInfo(Model):
-    """Key information.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required. The date-time the key is active in ISO 8601 UTC
-     time
-    :type start: str
-    :param expiry: Required. The date-time the key expires in ISO 8601 UTC
-     time
-    :type expiry: str
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'expiry': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(KeyInfo, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.expiry = kwargs.get('expiry', None)
-
-
-class LeaseAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param lease_id: If specified, the operation only succeeds if the
-     resource's lease is active and matches this ID.
-    :type lease_id: str
-    """
-
-    _attribute_map = {
-        'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(LeaseAccessConditions, self).__init__(**kwargs)
-        self.lease_id = kwargs.get('lease_id', None)
-
-
-class ListBlobsFlatSegmentResponse(Model):
-    """An enumeration of blobs.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param container_name: Required.
-    :type container_name: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param segment: Required.
-    :type segment: ~azure.storage.blob.models.BlobFlatListSegment
-    :param next_marker:
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'container_name': {'required': True},
-        'segment': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = kwargs.get('service_endpoint', None)
-        self.container_name = kwargs.get('container_name', None)
-        self.prefix = kwargs.get('prefix', None)
-        self.marker = kwargs.get('marker', None)
-        self.max_results = kwargs.get('max_results', None)
-        self.segment = kwargs.get('segment', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class ListBlobsHierarchySegmentResponse(Model):
-    """An enumeration of blobs.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param container_name: Required.
-    :type container_name: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param delimiter:
-    :type delimiter: str
-    :param segment: Required.
-    :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment
-    :param next_marker:
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'container_name': {'required': True},
-        'segment': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}},
-        'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = kwargs.get('service_endpoint', None)
-        self.container_name = kwargs.get('container_name', None)
-        self.prefix = kwargs.get('prefix', None)
-        self.marker = kwargs.get('marker', None)
-        self.max_results = kwargs.get('max_results', None)
-        self.delimiter = kwargs.get('delimiter', None)
-        self.segment = kwargs.get('segment', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class ListContainersSegmentResponse(Model):
-    """An enumeration of containers.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param container_items: Required.
-    :type container_items: list[~azure.storage.blob.models.ContainerItem]
-    :param next_marker:
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'container_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListContainersSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = kwargs.get('service_endpoint', None)
-        self.prefix = kwargs.get('prefix', None)
-        self.marker = kwargs.get('marker', None)
-        self.max_results = kwargs.get('max_results', None)
-        self.container_items = kwargs.get('container_items', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class Logging(Model):
-    """Azure Analytics Logging settings.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: Required. The version of Storage Analytics to configure.
-    :type version: str
-    :param delete: Required. Indicates whether all delete requests should be
-     logged.
-    :type delete: bool
-    :param read: Required. Indicates whether all read requests should be
-     logged.
-    :type read: bool
-    :param write: Required. Indicates whether all write requests should be
-     logged.
-    :type write: bool
-    :param retention_policy: Required.
-    :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
-    """
-
-    _validation = {
-        'version': {'required': True},
-        'delete': {'required': True},
-        'read': {'required': True},
-        'write': {'required': True},
-        'retention_policy': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
-        'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
-        'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(Logging, self).__init__(**kwargs)
-        self.version = kwargs.get('version', None)
-        self.delete = kwargs.get('delete', None)
-        self.read = kwargs.get('read', None)
-        self.write = kwargs.get('write', None)
-        self.retention_policy = kwargs.get('retention_policy', None)
-
-
-class Metrics(Model):
-    """a summary of request statistics grouped by API in hour or minute aggregates
-    for blobs.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: The version of Storage Analytics to configure.
-    :type version: str
-    :param enabled: Required. Indicates whether metrics are enabled for the
-     Blob service.
-    :type enabled: bool
-    :param include_apis: Indicates whether metrics should generate summary
-     statistics for called API operations.
-    :type include_apis: bool
-    :param retention_policy:
-    :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(Metrics, self).__init__(**kwargs)
-        self.version = kwargs.get('version', None)
-        self.enabled = kwargs.get('enabled', None)
-        self.include_apis = kwargs.get('include_apis', None)
-        self.retention_policy = kwargs.get('retention_policy', None)
-
-
-class ModifiedAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param if_modified_since: Specify this header value to operate only on a
-     blob if it has been modified since the specified date/time.
-    :type if_modified_since: datetime
-    :param if_unmodified_since: Specify this header value to operate only on a
-     blob if it has not been modified since the specified date/time.
-    :type if_unmodified_since: datetime
-    :param if_match: Specify an ETag value to operate only on blobs with a
-     matching value.
-    :type if_match: str
-    :param if_none_match: Specify an ETag value to operate only on blobs
-     without a matching value.
-    :type if_none_match: str
-    """
-
-    _attribute_map = {
-        'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}},
-        'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}},
-        'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}},
-        'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(ModifiedAccessConditions, self).__init__(**kwargs)
-        self.if_modified_since = kwargs.get('if_modified_since', None)
-        self.if_unmodified_since = kwargs.get('if_unmodified_since', None)
-        self.if_match = kwargs.get('if_match', None)
-        self.if_none_match = kwargs.get('if_none_match', None)
-
-
-class PageList(Model):
-    """the list of pages.
-
-    :param page_range:
-    :type page_range: list[~azure.storage.blob.models.PageRange]
-    :param clear_range:
-    :type clear_range: list[~azure.storage.blob.models.ClearRange]
-    """
-
-    _attribute_map = {
-        'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}},
-        'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(PageList, self).__init__(**kwargs)
-        self.page_range = kwargs.get('page_range', None)
-        self.clear_range = kwargs.get('clear_range', None)
-
-
-class PageRange(Model):
-    """PageRange.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required.
-    :type start: long
-    :param end: Required.
-    :type end: long
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'end': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
-        'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
-    }
-    _xml_map = {
-        'name': 'PageRange'
-    }
-
-    def __init__(self, **kwargs):
-        super(PageRange, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.end = kwargs.get('end', None)
-
-
-class RetentionPolicy(Model):
-    """the retention policy which determines how long the associated data should
-    persist.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether a retention policy is enabled
-     for the storage service
-    :type enabled: bool
-    :param days: Indicates the number of days that metrics or logging or
-     soft-deleted data should be retained. All data older than this value will
-     be deleted
-    :type days: int
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-        'days': {'minimum': 1},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(RetentionPolicy, self).__init__(**kwargs)
-        self.enabled = kwargs.get('enabled', None)
-        self.days = kwargs.get('days', None)
-
-
-class SequenceNumberAccessConditions(Model):
-    """Additional parameters for a set of operations, such as:
-    PageBlob_upload_pages, PageBlob_clear_pages,
-    PageBlob_upload_pages_from_url.
-
-    :param if_sequence_number_less_than_or_equal_to: Specify this header value
-     to operate only on a blob if it has a sequence number less than or equal
-     to the specified.
-    :type if_sequence_number_less_than_or_equal_to: long
-    :param if_sequence_number_less_than: Specify this header value to operate
-     only on a blob if it has a sequence number less than the specified.
-    :type if_sequence_number_less_than: long
-    :param if_sequence_number_equal_to: Specify this header value to operate
-     only on a blob if it has the specified sequence number.
-    :type if_sequence_number_equal_to: long
-    """
-
-    _attribute_map = {
-        'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}},
-        'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}},
-        'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(SequenceNumberAccessConditions, self).__init__(**kwargs)
-        self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None)
-        self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None)
-        self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None)
-
-
-class SignedIdentifier(Model):
-    """signed identifier.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param id: Required. a unique id
-    :type id: str
-    :param access_policy:
-    :type access_policy: ~azure.storage.blob.models.AccessPolicy
-    """
-
-    _validation = {
-        'id': {'required': True},
-    }
-
-    _attribute_map = {
-        'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
-        'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
-    }
-    _xml_map = {
-        'name': 'SignedIdentifier'
-    }
-
-    def __init__(self, **kwargs):
-        super(SignedIdentifier, self).__init__(**kwargs)
-        self.id = kwargs.get('id', None)
-        self.access_policy = kwargs.get('access_policy', None)
-
-
-class SourceModifiedAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param source_if_modified_since: Specify this header value to operate only
-     on a blob if it has been modified since the specified date/time.
-    :type source_if_modified_since: datetime
-    :param source_if_unmodified_since: Specify this header value to operate
-     only on a blob if it has not been modified since the specified date/time.
-    :type source_if_unmodified_since: datetime
-    :param source_if_match: Specify an ETag value to operate only on blobs
-     with a matching value.
-    :type source_if_match: str
-    :param source_if_none_match: Specify an ETag value to operate only on
-     blobs without a matching value.
-    :type source_if_none_match: str
-    """
-
-    _attribute_map = {
-        'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}},
-        'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}},
-        'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}},
-        'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(SourceModifiedAccessConditions, self).__init__(**kwargs)
-        self.source_if_modified_since = kwargs.get('source_if_modified_since', None)
-        self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None)
-        self.source_if_match = kwargs.get('source_if_match', None)
-        self.source_if_none_match = kwargs.get('source_if_none_match', None)
-
-
-class StaticWebsite(Model):
-    """The properties that enable an account to host a static website.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether this account is hosting a
-     static website
-    :type enabled: bool
-    :param index_document: The default name of the index page under each
-     directory
-    :type index_document: str
-    :param error_document404_path: The absolute path of the custom 404 page
-    :type error_document404_path: str
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}},
-        'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StaticWebsite, self).__init__(**kwargs)
-        self.enabled = kwargs.get('enabled', None)
-        self.index_document = kwargs.get('index_document', None)
-        self.error_document404_path = kwargs.get('error_document404_path', None)
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param message:
-    :type message: str
-    """
-
-    _attribute_map = {
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageError, self).__init__(**kwargs)
-        self.message = kwargs.get('message', None)
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageServiceProperties(Model):
-    """Storage Service Properties.
-
-    :param logging:
-    :type logging: ~azure.storage.blob.models.Logging
-    :param hour_metrics:
-    :type hour_metrics: ~azure.storage.blob.models.Metrics
-    :param minute_metrics:
-    :type minute_metrics: ~azure.storage.blob.models.Metrics
-    :param cors: The set of CORS rules.
-    :type cors: list[~azure.storage.blob.models.CorsRule]
-    :param default_service_version: The default version to use for requests to
-     the Blob service if an incoming request's version is not specified.
-     Possible values include version 2008-10-27 and all more recent versions
-    :type default_service_version: str
-    :param delete_retention_policy:
-    :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
-    :param static_website:
-    :type static_website: ~azure.storage.blob.models.StaticWebsite
-    """
-
-    _attribute_map = {
-        'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
-        'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
-        'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
-        'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
-        'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}},
-        'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}},
-        'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageServiceProperties, self).__init__(**kwargs)
-        self.logging = kwargs.get('logging', None)
-        self.hour_metrics = kwargs.get('hour_metrics', None)
-        self.minute_metrics = kwargs.get('minute_metrics', None)
-        self.cors = kwargs.get('cors', None)
-        self.default_service_version = kwargs.get('default_service_version', None)
-        self.delete_retention_policy = kwargs.get('delete_retention_policy', None)
-        self.static_website = kwargs.get('static_website', None)
-
-
-class StorageServiceStats(Model):
-    """Stats for the storage service.
-
-    :param geo_replication:
-    :type geo_replication: ~azure.storage.blob.models.GeoReplication
-    """
-
-    _attribute_map = {
-        'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageServiceStats, self).__init__(**kwargs)
-        self.geo_replication = kwargs.get('geo_replication', None)
-
-
-class UserDelegationKey(Model):
-    """A user delegation key.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param signed_oid: Required. The Azure Active Directory object ID in GUID
-     format.
-    :type signed_oid: str
-    :param signed_tid: Required. The Azure Active Directory tenant ID in GUID
-     format
-    :type signed_tid: str
-    :param signed_start: Required. The date-time the key is active
-    :type signed_start: datetime
-    :param signed_expiry: Required. The date-time the key expires
-    :type signed_expiry: datetime
-    :param signed_service: Required. Abbreviation of the Azure Storage service
-     that accepts the key
-    :type signed_service: str
-    :param signed_version: Required. The service version that created the key
-    :type signed_version: str
-    :param value: Required. The key as a base64 string
-    :type value: str
-    """
-
-    _validation = {
-        'signed_oid': {'required': True},
-        'signed_tid': {'required': True},
-        'signed_start': {'required': True},
-        'signed_expiry': {'required': True},
-        'signed_service': {'required': True},
-        'signed_version': {'required': True},
-        'value': {'required': True},
-    }
-
-    _attribute_map = {
-        'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}},
-        'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}},
-        'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}},
-        'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}},
-        'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}},
-        'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}},
-        'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(UserDelegationKey, self).__init__(**kwargs)
-        self.signed_oid = kwargs.get('signed_oid', None)
-        self.signed_tid = kwargs.get('signed_tid', None)
-        self.signed_start = kwargs.get('signed_start', None)
-        self.signed_expiry = kwargs.get('signed_expiry', None)
-        self.signed_service = kwargs.get('signed_service', None)
-        self.signed_version = kwargs.get('signed_version', None)
-        self.value = kwargs.get('value', None)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models_py3.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models_py3.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models_py3.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/models/_models_py3.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1581 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class AccessPolicy(Model):
-    """An Access policy.
-
-    :param start: the date-time the policy is active
-    :type start: str
-    :param expiry: the date-time the policy expires
-    :type expiry: str
-    :param permission: the permissions for the acl policy
-    :type permission: str
-    """
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-        'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None:
-        super(AccessPolicy, self).__init__(**kwargs)
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class AppendPositionAccessConditions(Model):
-    """Additional parameters for a set of operations, such as:
-    AppendBlob_append_block, AppendBlob_append_block_from_url.
-
-    :param max_size: Optional conditional header. The max length in bytes
-     permitted for the append blob. If the Append Block operation would cause
-     the blob to exceed that limit or if the blob size is already greater than
-     the value specified in this header, the request will fail with
-     MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition
-     Failed).
-    :type max_size: long
-    :param append_position: Optional conditional header, used only for the
-     Append Block operation. A number indicating the byte offset to compare.
-     Append Block will succeed only if the append position is equal to this
-     number. If it is not, the request will fail with the
-     AppendPositionConditionNotMet error (HTTP status code 412 - Precondition
-     Failed).
-    :type append_position: long
-    """
-
-    _attribute_map = {
-        'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}},
-        'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, max_size: int=None, append_position: int=None, **kwargs) -> None:
-        super(AppendPositionAccessConditions, self).__init__(**kwargs)
-        self.max_size = max_size
-        self.append_position = append_position
-
-
-class BlobFlatListSegment(Model):
-    """BlobFlatListSegment.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param blob_items: Required.
-    :type blob_items: list[~azure.storage.blob.models.BlobItem]
-    """
-
-    _validation = {
-        'blob_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}},
-    }
-    _xml_map = {
-        'name': 'Blobs'
-    }
-
-    def __init__(self, *, blob_items, **kwargs) -> None:
-        super(BlobFlatListSegment, self).__init__(**kwargs)
-        self.blob_items = blob_items
-
-
-class BlobHierarchyListSegment(Model):
-    """BlobHierarchyListSegment.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param blob_prefixes:
-    :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
-    :param blob_items: Required.
-    :type blob_items: list[~azure.storage.blob.models.BlobItem]
-    """
-
-    _validation = {
-        'blob_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}},
-        'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}},
-    }
-    _xml_map = {
-        'name': 'Blobs'
-    }
-
-    def __init__(self, *, blob_items, blob_prefixes=None, **kwargs) -> None:
-        super(BlobHierarchyListSegment, self).__init__(**kwargs)
-        self.blob_prefixes = blob_prefixes
-        self.blob_items = blob_items
-
-
-class BlobHTTPHeaders(Model):
-    """Additional parameters for a set of operations.
-
-    :param blob_cache_control: Optional. Sets the blob's cache control. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type blob_cache_control: str
-    :param blob_content_type: Optional. Sets the blob's content type. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type blob_content_type: str
-    :param blob_content_md5: Optional. An MD5 hash of the blob content. Note
-     that this hash is not validated, as the hashes for the individual blocks
-     were validated when each was uploaded.
-    :type blob_content_md5: bytearray
-    :param blob_content_encoding: Optional. Sets the blob's content encoding.
-     If specified, this property is stored with the blob and returned with a
-     read request.
-    :type blob_content_encoding: str
-    :param blob_content_language: Optional. Set the blob's content language.
-     If specified, this property is stored with the blob and returned with a
-     read request.
-    :type blob_content_language: str
-    :param blob_content_disposition: Optional. Sets the blob's
-     Content-Disposition header.
-    :type blob_content_disposition: str
-    """
-
-    _attribute_map = {
-        'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}},
-        'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}},
-        'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}},
-        'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}},
-        'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}},
-        'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, blob_content_md5: bytearray=None, blob_content_encoding: str=None, blob_content_language: str=None, blob_content_disposition: str=None, **kwargs) -> None:
-        super(BlobHTTPHeaders, self).__init__(**kwargs)
-        self.blob_cache_control = blob_cache_control
-        self.blob_content_type = blob_content_type
-        self.blob_content_md5 = blob_content_md5
-        self.blob_content_encoding = blob_content_encoding
-        self.blob_content_language = blob_content_language
-        self.blob_content_disposition = blob_content_disposition
-
-
-class BlobItem(Model):
-    """An Azure Storage blob.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param deleted: Required.
-    :type deleted: bool
-    :param snapshot: Required.
-    :type snapshot: str
-    :param properties: Required.
-    :type properties: ~azure.storage.blob.models.BlobProperties
-    :param metadata:
-    :type metadata: ~azure.storage.blob.models.BlobMetadata
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'deleted': {'required': True},
-        'snapshot': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
-        'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
-        'properties': {'key': 'Properties', 'type': 'BlobProperties', 'xml': {'name': 'Properties'}},
-        'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Blob'
-    }
-
-    def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, metadata=None, **kwargs) -> None:
-        super(BlobItem, self).__init__(**kwargs)
-        self.name = name
-        self.deleted = deleted
-        self.snapshot = snapshot
-        self.properties = properties
-        self.metadata = metadata
-
-
-class BlobMetadata(Model):
-    """BlobMetadata.
-
-    :param additional_properties: Unmatched properties from the message are
-     deserialized this collection
-    :type additional_properties: dict[str, str]
-    :param encrypted:
-    :type encrypted: str
-    """
-
-    _attribute_map = {
-        'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}},
-        'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}},
-    }
-    _xml_map = {
-        'name': 'Metadata'
-    }
-
-    def __init__(self, *, additional_properties=None, encrypted: str=None, **kwargs) -> None:
-        super(BlobMetadata, self).__init__(**kwargs)
-        self.additional_properties = additional_properties
-        self.encrypted = encrypted
-
-
-class BlobPrefix(Model):
-    """BlobPrefix.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    """
-
-    _validation = {
-        'name': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, name: str, **kwargs) -> None:
-        super(BlobPrefix, self).__init__(**kwargs)
-        self.name = name
-
-
-class BlobProperties(Model):
-    """Properties of a blob.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param creation_time:
-    :type creation_time: datetime
-    :param last_modified: Required.
-    :type last_modified: datetime
-    :param etag: Required.
-    :type etag: str
-    :param content_length: Size in bytes
-    :type content_length: long
-    :param content_type:
-    :type content_type: str
-    :param content_encoding:
-    :type content_encoding: str
-    :param content_language:
-    :type content_language: str
-    :param content_md5:
-    :type content_md5: bytearray
-    :param content_disposition:
-    :type content_disposition: str
-    :param cache_control:
-    :type cache_control: str
-    :param blob_sequence_number:
-    :type blob_sequence_number: long
-    :param blob_type: Possible values include: 'BlockBlob', 'PageBlob',
-     'AppendBlob'
-    :type blob_type: str or ~azure.storage.blob.models.BlobType
-    :param lease_status: Possible values include: 'locked', 'unlocked'
-    :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
-    :param lease_state: Possible values include: 'available', 'leased',
-     'expired', 'breaking', 'broken'
-    :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
-    :param lease_duration: Possible values include: 'infinite', 'fixed'
-    :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
-    :param copy_id:
-    :type copy_id: str
-    :param copy_status: Possible values include: 'pending', 'success',
-     'aborted', 'failed'
-    :type copy_status: str or ~azure.storage.blob.models.CopyStatusType
-    :param copy_source:
-    :type copy_source: str
-    :param copy_progress:
-    :type copy_progress: str
-    :param copy_completion_time:
-    :type copy_completion_time: datetime
-    :param copy_status_description:
-    :type copy_status_description: str
-    :param server_encrypted:
-    :type server_encrypted: bool
-    :param incremental_copy:
-    :type incremental_copy: bool
-    :param destination_snapshot:
-    :type destination_snapshot: str
-    :param deleted_time:
-    :type deleted_time: datetime
-    :param remaining_retention_days:
-    :type remaining_retention_days: int
-    :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15',
-     'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-    :type access_tier: str or ~azure.storage.blob.models.AccessTier
-    :param access_tier_inferred:
-    :type access_tier_inferred: bool
-    :param archive_status: Possible values include:
-     'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool'
-    :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus
-    :param customer_provided_key_sha256:
-    :type customer_provided_key_sha256: str
-    :param encryption_scope: The name of the encryption scope under which the
-     blob is encrypted.
-    :type encryption_scope: str
-    :param access_tier_change_time:
-    :type access_tier_change_time: datetime
-    """
-
-    _validation = {
-        'last_modified': {'required': True},
-        'etag': {'required': True},
-    }
-
-    _attribute_map = {
-        'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}},
-        'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
-        'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
-        'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
-        'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}},
-        'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}},
-        'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}},
-        'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}},
-        'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}},
-        'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}},
-        'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}},
-        'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}},
-        'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
-        'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
-        'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
-        'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}},
-        'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}},
-        'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}},
-        'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}},
-        'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}},
-        'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}},
-        'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}},
-        'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}},
-        'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}},
-        'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
-        'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
-        'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}},
-        'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}},
-        'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}},
-        'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}},
-        'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}},
-        'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}},
-    }
-    _xml_map = {
-        'name': 'Properties'
-    }
-
-    def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, **kwargs) -> None:
-        super(BlobProperties, self).__init__(**kwargs)
-        self.creation_time = creation_time
-        self.last_modified = last_modified
-        self.etag = etag
-        self.content_length = content_length
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_md5 = content_md5
-        self.content_disposition = content_disposition
-        self.cache_control = cache_control
-        self.blob_sequence_number = blob_sequence_number
-        self.blob_type = blob_type
-        self.lease_status = lease_status
-        self.lease_state = lease_state
-        self.lease_duration = lease_duration
-        self.copy_id = copy_id
-        self.copy_status = copy_status
-        self.copy_source = copy_source
-        self.copy_progress = copy_progress
-        self.copy_completion_time = copy_completion_time
-        self.copy_status_description = copy_status_description
-        self.server_encrypted = server_encrypted
-        self.incremental_copy = incremental_copy
-        self.destination_snapshot = destination_snapshot
-        self.deleted_time = deleted_time
-        self.remaining_retention_days = remaining_retention_days
-        self.access_tier = access_tier
-        self.access_tier_inferred = access_tier_inferred
-        self.archive_status = archive_status
-        self.customer_provided_key_sha256 = customer_provided_key_sha256
-        self.encryption_scope = encryption_scope
-        self.access_tier_change_time = access_tier_change_time
-
-
-class Block(Model):
-    """Represents a single block in a block blob.  It describes the block's ID and
-    size.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required. The base64 encoded block ID.
-    :type name: str
-    :param size: Required. The block size in bytes.
-    :type size: int
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'size': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, name: str, size: int, **kwargs) -> None:
-        super(Block, self).__init__(**kwargs)
-        self.name = name
-        self.size = size
-
-
-class BlockList(Model):
-    """BlockList.
-
-    :param committed_blocks:
-    :type committed_blocks: list[~azure.storage.blob.models.Block]
-    :param uncommitted_blocks:
-    :type uncommitted_blocks: list[~azure.storage.blob.models.Block]
-    """
-
-    _attribute_map = {
-        'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
-        'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, committed_blocks=None, uncommitted_blocks=None, **kwargs) -> None:
-        super(BlockList, self).__init__(**kwargs)
-        self.committed_blocks = committed_blocks
-        self.uncommitted_blocks = uncommitted_blocks
-
-
-class BlockLookupList(Model):
-    """BlockLookupList.
-
-    :param committed:
-    :type committed: list[str]
-    :param uncommitted:
-    :type uncommitted: list[str]
-    :param latest:
-    :type latest: list[str]
-    """
-
-    _attribute_map = {
-        'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}},
-        'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}},
-        'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}},
-    }
-    _xml_map = {
-        'name': 'BlockList'
-    }
-
-    def __init__(self, *, committed=None, uncommitted=None, latest=None, **kwargs) -> None:
-        super(BlockLookupList, self).__init__(**kwargs)
-        self.committed = committed
-        self.uncommitted = uncommitted
-        self.latest = latest
-
-
-class ClearRange(Model):
-    """ClearRange.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required.
-    :type start: long
-    :param end: Required.
-    :type end: long
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'end': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
-        'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
-    }
-    _xml_map = {
-        'name': 'ClearRange'
-    }
-
-    def __init__(self, *, start: int, end: int, **kwargs) -> None:
-        super(ClearRange, self).__init__(**kwargs)
-        self.start = start
-        self.end = end
-
-
-class ContainerCpkScopeInfo(Model):
-    """Additional parameters for create operation.
-
-    :param default_encryption_scope: Optional.  Version 2019-07-07 and later.
-     Specifies the default encryption scope to set on the container and use for
-     all future writes.
-    :type default_encryption_scope: str
-    :param prevent_encryption_scope_override: Optional.  Version 2019-07-07
-     and newer.  If true, prevents any request from specifying a different
-     encryption scope than the scope set on the container.
-    :type prevent_encryption_scope_override: bool
-    """
-
-    _attribute_map = {
-        'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}},
-        'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None:
-        super(ContainerCpkScopeInfo, self).__init__(**kwargs)
-        self.default_encryption_scope = default_encryption_scope
-        self.prevent_encryption_scope_override = prevent_encryption_scope_override
-
-
-class ContainerItem(Model):
-    """An Azure Storage container.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param properties: Required.
-    :type properties: ~azure.storage.blob.models.ContainerProperties
-    :param metadata:
-    :type metadata: dict[str, str]
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}},
-        'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Container'
-    }
-
-    def __init__(self, *, name: str, properties, metadata=None, **kwargs) -> None:
-        super(ContainerItem, self).__init__(**kwargs)
-        self.name = name
-        self.properties = properties
-        self.metadata = metadata
-
-
-class ContainerProperties(Model):
-    """Properties of a container.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param last_modified: Required.
-    :type last_modified: datetime
-    :param etag: Required.
-    :type etag: str
-    :param lease_status: Possible values include: 'locked', 'unlocked'
-    :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
-    :param lease_state: Possible values include: 'available', 'leased',
-     'expired', 'breaking', 'broken'
-    :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
-    :param lease_duration: Possible values include: 'infinite', 'fixed'
-    :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
-    :param public_access: Possible values include: 'container', 'blob'
-    :type public_access: str or ~azure.storage.blob.models.PublicAccessType
-    :param has_immutability_policy:
-    :type has_immutability_policy: bool
-    :param has_legal_hold:
-    :type has_legal_hold: bool
-    :param default_encryption_scope:
-    :type default_encryption_scope: str
-    :param prevent_encryption_scope_override:
-    :type prevent_encryption_scope_override: bool
-    """
-
-    _validation = {
-        'last_modified': {'required': True},
-        'etag': {'required': True},
-    }
-
-    _attribute_map = {
-        'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
-        'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
-        'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
-        'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
-        'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
-        'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}},
-        'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}},
-        'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}},
-        'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}},
-        'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None:
-        super(ContainerProperties, self).__init__(**kwargs)
-        self.last_modified = last_modified
-        self.etag = etag
-        self.lease_status = lease_status
-        self.lease_state = lease_state
-        self.lease_duration = lease_duration
-        self.public_access = public_access
-        self.has_immutability_policy = has_immutability_policy
-        self.has_legal_hold = has_legal_hold
-        self.default_encryption_scope = default_encryption_scope
-        self.prevent_encryption_scope_override = prevent_encryption_scope_override
-
-
-class CorsRule(Model):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param allowed_origins: Required. The origin domains that are permitted to
-     make a request against the storage service via CORS. The origin domain is
-     the domain from which the request originates. Note that the origin must be
-     an exact case-sensitive match with the origin that the user age sends to
-     the service. You can also use the wildcard character '*' to allow all
-     origin domains to make requests via CORS.
-    :type allowed_origins: str
-    :param allowed_methods: Required. The methods (HTTP request verbs) that
-     the origin domain may use for a CORS request. (comma separated)
-    :type allowed_methods: str
-    :param allowed_headers: Required. the request headers that the origin
-     domain may specify on the CORS request.
-    :type allowed_headers: str
-    :param exposed_headers: Required. The response headers that may be sent in
-     the response to the CORS request and exposed by the browser to the request
-     issuer
-    :type exposed_headers: str
-    :param max_age_in_seconds: Required. The maximum amount time that a
-     browser should cache the preflight OPTIONS request.
-    :type max_age_in_seconds: int
-    """
-
-    _validation = {
-        'allowed_origins': {'required': True},
-        'allowed_methods': {'required': True},
-        'allowed_headers': {'required': True},
-        'exposed_headers': {'required': True},
-        'max_age_in_seconds': {'required': True, 'minimum': 0},
-    }
-
-    _attribute_map = {
-        'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
-        'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
-        'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
-        'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
-        'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None:
-        super(CorsRule, self).__init__(**kwargs)
-        self.allowed_origins = allowed_origins
-        self.allowed_methods = allowed_methods
-        self.allowed_headers = allowed_headers
-        self.exposed_headers = exposed_headers
-        self.max_age_in_seconds = max_age_in_seconds
-
-
-class CpkInfo(Model):
-    """Additional parameters for a set of operations.
-
-    :param encryption_key: Optional. Specifies the encryption key to use to
-     encrypt the data provided in the request. If not specified, encryption is
-     performed with the root account encryption key.  For more information, see
-     Encryption at Rest for Azure Storage Services.
-    :type encryption_key: str
-    :param encryption_key_sha256: The SHA-256 hash of the provided encryption
-     key. Must be provided if the x-ms-encryption-key header is provided.
-    :type encryption_key_sha256: str
-    :param encryption_algorithm: The algorithm used to produce the encryption
-     key hash. Currently, the only accepted value is "AES256". Must be provided
-     if the x-ms-encryption-key header is provided. Possible values include:
-     'AES256'
-    :type encryption_algorithm: str or
-     ~azure.storage.blob.models.EncryptionAlgorithmType
-    """
-
-    _attribute_map = {
-        'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}},
-        'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}},
-        'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, encryption_key: str=None, encryption_key_sha256: str=None, encryption_algorithm=None, **kwargs) -> None:
-        super(CpkInfo, self).__init__(**kwargs)
-        self.encryption_key = encryption_key
-        self.encryption_key_sha256 = encryption_key_sha256
-        self.encryption_algorithm = encryption_algorithm
-
-
-class CpkScopeInfo(Model):
-    """Additional parameters for a set of operations.
-
-    :param encryption_scope: Optional. Version 2019-07-07 and later.
-     Specifies the name of the encryption scope to use to encrypt the data
-     provided in the request. If not specified, encryption is performed with
-     the default account encryption scope.  For more information, see
-     Encryption at Rest for Azure Storage Services.
-    :type encryption_scope: str
-    """
-
-    _attribute_map = {
-        'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, encryption_scope: str=None, **kwargs) -> None:
-        super(CpkScopeInfo, self).__init__(**kwargs)
-        self.encryption_scope = encryption_scope
-
-
-class DataLakeStorageError(Model):
-    """DataLakeStorageError.
-
-    :param error: The service error response object.
-    :type error: ~azure.storage.blob.models.DataLakeStorageErrorError
-    """
-
-    _attribute_map = {
-        'error': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, error=None, **kwargs) -> None:
-        super(DataLakeStorageError, self).__init__(**kwargs)
-        self.error = error
-
-
-class DataLakeStorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'DataLakeStorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'DataLakeStorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(DataLakeStorageErrorException, self).__init__(response=response)
-
-
-class DataLakeStorageErrorError(Model):
-    """The service error response object.
-
-    :param code: The service error code.
-    :type code: str
-    :param message: The service error message.
-    :type message: str
-    """
-
-    _attribute_map = {
-        'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}},
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None:
-        super(DataLakeStorageErrorError, self).__init__(**kwargs)
-        self.code = code
-        self.message = message
-
-
-class DirectoryHttpHeaders(Model):
-    """Additional parameters for a set of operations, such as: Directory_create,
-    Directory_rename, Blob_rename.
-
-    :param cache_control: Cache control for given resource
-    :type cache_control: str
-    :param content_type: Content type for given resource
-    :type content_type: str
-    :param content_encoding: Content encoding for given resource
-    :type content_encoding: str
-    :param content_language: Content language for given resource
-    :type content_language: str
-    :param content_disposition: Content disposition for given resource
-    :type content_disposition: str
-    """
-
-    _attribute_map = {
-        'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}},
-        'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}},
-        'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}},
-        'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}},
-        'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, cache_control: str=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, **kwargs) -> None:
-        super(DirectoryHttpHeaders, self).__init__(**kwargs)
-        self.cache_control = cache_control
-        self.content_type = content_type
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-
-
-class GeoReplication(Model):
-    """Geo-Replication information for the Secondary Storage Service.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param status: Required. The status of the secondary location. Possible
-     values include: 'live', 'bootstrap', 'unavailable'
-    :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType
-    :param last_sync_time: Required. A GMT date/time value, to the second. All
-     primary writes preceding this value are guaranteed to be available for
-     read operations at the secondary. Primary writes after this point in time
-     may or may not be available for reads.
-    :type last_sync_time: datetime
-    """
-
-    _validation = {
-        'status': {'required': True},
-        'last_sync_time': {'required': True},
-    }
-
-    _attribute_map = {
-        'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
-        'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, status, last_sync_time, **kwargs) -> None:
-        super(GeoReplication, self).__init__(**kwargs)
-        self.status = status
-        self.last_sync_time = last_sync_time
-
-
-class KeyInfo(Model):
-    """Key information.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required. The date-time the key is active in ISO 8601 UTC
-     time
-    :type start: str
-    :param expiry: Required. The date-time the key expires in ISO 8601 UTC
-     time
-    :type expiry: str
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'expiry': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, start: str, expiry: str, **kwargs) -> None:
-        super(KeyInfo, self).__init__(**kwargs)
-        self.start = start
-        self.expiry = expiry
-
-
-class LeaseAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param lease_id: If specified, the operation only succeeds if the
-     resource's lease is active and matches this ID.
-    :type lease_id: str
-    """
-
-    _attribute_map = {
-        'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, lease_id: str=None, **kwargs) -> None:
-        super(LeaseAccessConditions, self).__init__(**kwargs)
-        self.lease_id = lease_id
-
-
-class ListBlobsFlatSegmentResponse(Model):
-    """An enumeration of blobs.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param container_name: Required.
-    :type container_name: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param segment: Required.
-    :type segment: ~azure.storage.blob.models.BlobFlatListSegment
-    :param next_marker:
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'container_name': {'required': True},
-        'segment': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None:
-        super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = service_endpoint
-        self.container_name = container_name
-        self.prefix = prefix
-        self.marker = marker
-        self.max_results = max_results
-        self.segment = segment
-        self.next_marker = next_marker
-
-
-class ListBlobsHierarchySegmentResponse(Model):
-    """An enumeration of blobs.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param container_name: Required.
-    :type container_name: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param delimiter:
-    :type delimiter: str
-    :param segment: Required.
-    :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment
-    :param next_marker:
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'container_name': {'required': True},
-        'segment': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}},
-        'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, delimiter: str=None, next_marker: str=None, **kwargs) -> None:
-        super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = service_endpoint
-        self.container_name = container_name
-        self.prefix = prefix
-        self.marker = marker
-        self.max_results = max_results
-        self.delimiter = delimiter
-        self.segment = segment
-        self.next_marker = next_marker
-
-
-class ListContainersSegmentResponse(Model):
-    """An enumeration of containers.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param container_items: Required.
-    :type container_items: list[~azure.storage.blob.models.ContainerItem]
-    :param next_marker:
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'container_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, service_endpoint: str, container_items, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None:
-        super(ListContainersSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = service_endpoint
-        self.prefix = prefix
-        self.marker = marker
-        self.max_results = max_results
-        self.container_items = container_items
-        self.next_marker = next_marker
-
-
-class Logging(Model):
-    """Azure Analytics Logging settings.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: Required. The version of Storage Analytics to configure.
-    :type version: str
-    :param delete: Required. Indicates whether all delete requests should be
-     logged.
-    :type delete: bool
-    :param read: Required. Indicates whether all read requests should be
-     logged.
-    :type read: bool
-    :param write: Required. Indicates whether all write requests should be
-     logged.
-    :type write: bool
-    :param retention_policy: Required.
-    :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
-    """
-
-    _validation = {
-        'version': {'required': True},
-        'delete': {'required': True},
-        'read': {'required': True},
-        'write': {'required': True},
-        'retention_policy': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
-        'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
-        'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None:
-        super(Logging, self).__init__(**kwargs)
-        self.version = version
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy
-
-
-class Metrics(Model):
-    """a summary of request statistics grouped by API in hour or minute aggregates
-    for blobs.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: The version of Storage Analytics to configure.
-    :type version: str
-    :param enabled: Required. Indicates whether metrics are enabled for the
-     Blob service.
-    :type enabled: bool
-    :param include_apis: Indicates whether metrics should generate summary
-     statistics for called API operations.
-    :type include_apis: bool
-    :param retention_policy:
-    :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None:
-        super(Metrics, self).__init__(**kwargs)
-        self.version = version
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy
-
-
-class ModifiedAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param if_modified_since: Specify this header value to operate only on a
-     blob if it has been modified since the specified date/time.
-    :type if_modified_since: datetime
-    :param if_unmodified_since: Specify this header value to operate only on a
-     blob if it has not been modified since the specified date/time.
-    :type if_unmodified_since: datetime
-    :param if_match: Specify an ETag value to operate only on blobs with a
-     matching value.
-    :type if_match: str
-    :param if_none_match: Specify an ETag value to operate only on blobs
-     without a matching value.
-    :type if_none_match: str
-    """
-
-    _attribute_map = {
-        'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}},
-        'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}},
-        'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}},
-        'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, **kwargs) -> None:
-        super(ModifiedAccessConditions, self).__init__(**kwargs)
-        self.if_modified_since = if_modified_since
-        self.if_unmodified_since = if_unmodified_since
-        self.if_match = if_match
-        self.if_none_match = if_none_match
-
-
-class PageList(Model):
-    """the list of pages.
-
-    :param page_range:
-    :type page_range: list[~azure.storage.blob.models.PageRange]
-    :param clear_range:
-    :type clear_range: list[~azure.storage.blob.models.ClearRange]
-    """
-
-    _attribute_map = {
-        'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}},
-        'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, page_range=None, clear_range=None, **kwargs) -> None:
-        super(PageList, self).__init__(**kwargs)
-        self.page_range = page_range
-        self.clear_range = clear_range
-
-
-class PageRange(Model):
-    """PageRange.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required.
-    :type start: long
-    :param end: Required.
-    :type end: long
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'end': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
-        'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
-    }
-    _xml_map = {
-        'name': 'PageRange'
-    }
-
-    def __init__(self, *, start: int, end: int, **kwargs) -> None:
-        super(PageRange, self).__init__(**kwargs)
-        self.start = start
-        self.end = end
-
-
-class RetentionPolicy(Model):
-    """the retention policy which determines how long the associated data should
-    persist.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether a retention policy is enabled
-     for the storage service
-    :type enabled: bool
-    :param days: Indicates the number of days that metrics or logging or
-     soft-deleted data should be retained. All data older than this value will
-     be deleted
-    :type days: int
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-        'days': {'minimum': 1},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None:
-        super(RetentionPolicy, self).__init__(**kwargs)
-        self.enabled = enabled
-        self.days = days
-
-
-class SequenceNumberAccessConditions(Model):
-    """Additional parameters for a set of operations, such as:
-    PageBlob_upload_pages, PageBlob_clear_pages,
-    PageBlob_upload_pages_from_url.
-
-    :param if_sequence_number_less_than_or_equal_to: Specify this header value
-     to operate only on a blob if it has a sequence number less than or equal
-     to the specified.
-    :type if_sequence_number_less_than_or_equal_to: long
-    :param if_sequence_number_less_than: Specify this header value to operate
-     only on a blob if it has a sequence number less than the specified.
-    :type if_sequence_number_less_than: long
-    :param if_sequence_number_equal_to: Specify this header value to operate
-     only on a blob if it has the specified sequence number.
-    :type if_sequence_number_equal_to: long
-    """
-
-    _attribute_map = {
-        'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}},
-        'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}},
-        'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, if_sequence_number_less_than_or_equal_to: int=None, if_sequence_number_less_than: int=None, if_sequence_number_equal_to: int=None, **kwargs) -> None:
-        super(SequenceNumberAccessConditions, self).__init__(**kwargs)
-        self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to
-        self.if_sequence_number_less_than = if_sequence_number_less_than
-        self.if_sequence_number_equal_to = if_sequence_number_equal_to
-
-
-class SignedIdentifier(Model):
-    """signed identifier.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param id: Required. a unique id
-    :type id: str
-    :param access_policy:
-    :type access_policy: ~azure.storage.blob.models.AccessPolicy
-    """
-
-    _validation = {
-        'id': {'required': True},
-    }
-
-    _attribute_map = {
-        'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
-        'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
-    }
-    _xml_map = {
-        'name': 'SignedIdentifier'
-    }
-
-    def __init__(self, *, id: str, access_policy=None, **kwargs) -> None:
-        super(SignedIdentifier, self).__init__(**kwargs)
-        self.id = id
-        self.access_policy = access_policy
-
-
-class SourceModifiedAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param source_if_modified_since: Specify this header value to operate only
-     on a blob if it has been modified since the specified date/time.
-    :type source_if_modified_since: datetime
-    :param source_if_unmodified_since: Specify this header value to operate
-     only on a blob if it has not been modified since the specified date/time.
-    :type source_if_unmodified_since: datetime
-    :param source_if_match: Specify an ETag value to operate only on blobs
-     with a matching value.
-    :type source_if_match: str
-    :param source_if_none_match: Specify an ETag value to operate only on
-     blobs without a matching value.
-    :type source_if_none_match: str
-    """
-
-    _attribute_map = {
-        'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}},
-        'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}},
-        'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}},
-        'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match: str=None, source_if_none_match: str=None, **kwargs) -> None:
-        super(SourceModifiedAccessConditions, self).__init__(**kwargs)
-        self.source_if_modified_since = source_if_modified_since
-        self.source_if_unmodified_since = source_if_unmodified_since
-        self.source_if_match = source_if_match
-        self.source_if_none_match = source_if_none_match
-
-
-class StaticWebsite(Model):
-    """The properties that enable an account to host a static website.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether this account is hosting a
-     static website
-    :type enabled: bool
-    :param index_document: The default name of the index page under each
-     directory
-    :type index_document: str
-    :param error_document404_path: The absolute path of the custom 404 page
-    :type error_document404_path: str
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}},
-        'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, **kwargs) -> None:
-        super(StaticWebsite, self).__init__(**kwargs)
-        self.enabled = enabled
-        self.index_document = index_document
-        self.error_document404_path = error_document404_path
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param message:
-    :type message: str
-    """
-
-    _attribute_map = {
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, message: str=None, **kwargs) -> None:
-        super(StorageError, self).__init__(**kwargs)
-        self.message = message
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageServiceProperties(Model):
-    """Storage Service Properties.
-
-    :param logging:
-    :type logging: ~azure.storage.blob.models.Logging
-    :param hour_metrics:
-    :type hour_metrics: ~azure.storage.blob.models.Metrics
-    :param minute_metrics:
-    :type minute_metrics: ~azure.storage.blob.models.Metrics
-    :param cors: The set of CORS rules.
-    :type cors: list[~azure.storage.blob.models.CorsRule]
-    :param default_service_version: The default version to use for requests to
-     the Blob service if an incoming request's version is not specified.
-     Possible values include version 2008-10-27 and all more recent versions
-    :type default_service_version: str
-    :param delete_retention_policy:
-    :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
-    :param static_website:
-    :type static_website: ~azure.storage.blob.models.StaticWebsite
-    """
-
-    _attribute_map = {
-        'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
-        'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
-        'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
-        'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
-        'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}},
-        'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}},
-        'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, default_service_version: str=None, delete_retention_policy=None, static_website=None, **kwargs) -> None:
-        super(StorageServiceProperties, self).__init__(**kwargs)
-        self.logging = logging
-        self.hour_metrics = hour_metrics
-        self.minute_metrics = minute_metrics
-        self.cors = cors
-        self.default_service_version = default_service_version
-        self.delete_retention_policy = delete_retention_policy
-        self.static_website = static_website
-
-
-class StorageServiceStats(Model):
-    """Stats for the storage service.
-
-    :param geo_replication:
-    :type geo_replication: ~azure.storage.blob.models.GeoReplication
-    """
-
-    _attribute_map = {
-        'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, geo_replication=None, **kwargs) -> None:
-        super(StorageServiceStats, self).__init__(**kwargs)
-        self.geo_replication = geo_replication
-
-
-class UserDelegationKey(Model):
-    """A user delegation key.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param signed_oid: Required. The Azure Active Directory object ID in GUID
-     format.
-    :type signed_oid: str
-    :param signed_tid: Required. The Azure Active Directory tenant ID in GUID
-     format
-    :type signed_tid: str
-    :param signed_start: Required. The date-time the key is active
-    :type signed_start: datetime
-    :param signed_expiry: Required. The date-time the key expires
-    :type signed_expiry: datetime
-    :param signed_service: Required. Abbreviation of the Azure Storage service
-     that accepts the key
-    :type signed_service: str
-    :param signed_version: Required. The service version that created the key
-    :type signed_version: str
-    :param value: Required. The key as a base64 string
-    :type value: str
-    """
-
-    _validation = {
-        'signed_oid': {'required': True},
-        'signed_tid': {'required': True},
-        'signed_start': {'required': True},
-        'signed_expiry': {'required': True},
-        'signed_service': {'required': True},
-        'signed_version': {'required': True},
-        'value': {'required': True},
-    }
-
-    _attribute_map = {
-        'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}},
-        'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}},
-        'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}},
-        'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}},
-        'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}},
-        'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}},
-        'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, signed_oid: str, signed_tid: str, signed_start, signed_expiry, signed_service: str, signed_version: str, value: str, **kwargs) -> None:
-        super(UserDelegationKey, self).__init__(**kwargs)
-        self.signed_oid = signed_oid
-        self.signed_tid = signed_tid
-        self.signed_start = signed_start
-        self.signed_expiry = signed_expiry
-        self.signed_service = signed_service
-        self.signed_version = signed_version
-        self.value = value
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,28 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations import ServiceOperations
-from ._container_operations import ContainerOperations
-from ._directory_operations import DirectoryOperations
-from ._blob_operations import BlobOperations
-from ._page_blob_operations import PageBlobOperations
-from ._append_blob_operations import AppendBlobOperations
-from ._block_blob_operations import BlockBlobOperations
-
-__all__ = [
-    'ServiceOperations',
-    'ContainerOperations',
-    'DirectoryOperations',
-    'BlobOperations',
-    'PageBlobOperations',
-    'AppendBlobOperations',
-    'BlockBlobOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_append_blob_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_append_blob_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_append_blob_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_append_blob_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,563 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class AppendBlobOperations(object):
-    """AppendBlobOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob".
-    :ivar comp: . Constant value: "appendblock".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_blob_type = "AppendBlob"
-        self.comp = "appendblock"
-
-    def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Create Append Blob operation creates a new append blob.
-
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{containerName}/{blob}'}
-
-    def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Append Block operation commits a new block of data to the end of an
-        existing append blob. The Append Block operation is permitted only if
-        the blob was created with x-ms-blob-type set to AppendBlob. Append
-        Block is supported only on version 2015-02-21 version or later.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param append_position_access_conditions: Additional parameters for
-         the operation
-        :type append_position_access_conditions:
-         ~azure.storage.blob.models.AppendPositionAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        max_size = None
-        if append_position_access_conditions is not None:
-            max_size = append_position_access_conditions.max_size
-        append_position = None
-        if append_position_access_conditions is not None:
-            append_position = append_position_access_conditions.append_position
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.append_block.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if max_size is not None:
-            header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
-        if append_position is not None:
-            header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    append_block.metadata = {'url': '/{containerName}/{blob}'}
-
-    def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
-        """The Append Block operation commits a new block of data to the end of an
-        existing append blob where the contents are read from a source url. The
-        Append Block operation is permitted only if the blob was created with
-        x-ms-blob-type set to AppendBlob. Append Block is supported only on
-        version 2015-02-21 version or later.
-
-        :param source_url: Specify a URL to the copy source.
-        :type source_url: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param source_range: Bytes of source data in the specified range.
-        :type source_range: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_contentcrc64: Specify the crc64 calculated for the range
-         of bytes that must be read from the copy source.
-        :type source_contentcrc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param append_position_access_conditions: Additional parameters for
-         the operation
-        :type append_position_access_conditions:
-         ~azure.storage.blob.models.AppendPositionAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        max_size = None
-        if append_position_access_conditions is not None:
-            max_size = append_position_access_conditions.max_size
-        append_position = None
-        if append_position_access_conditions is not None:
-            append_position = append_position_access_conditions.append_position
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        # Construct URL
-        url = self.append_block_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
-        if source_range is not None:
-            header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        if source_contentcrc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if max_size is not None:
-            header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
-        if append_position is not None:
-            header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    append_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_blob_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_blob_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_blob_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_blob_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,2442 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class BlobOperations(object):
-    """BlobOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_requires_sync: . Constant value: "true".
-    :ivar x_ms_copy_action: . Constant value: "abort".
-    :ivar restype: . Constant value: "account".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_requires_sync = "true"
-        self.x_ms_copy_action = "abort"
-        self.restype = "account"
-
-    def download(self, snapshot=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Download operation reads or downloads a blob from the system,
-        including its metadata and properties. You can also call Download to
-        read a snapshot.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param range_get_content_md5: When set to true and specified together
-         with the Range, the service returns the MD5 hash for the range, as
-         long as the range is less than or equal to 4 MB in size.
-        :type range_get_content_md5: bool
-        :param range_get_content_crc64: When set to true and specified
-         together with the Range, the service returns the CRC64 hash for the
-         range, as long as the range is less than or equal to 4 MB in size.
-        :type range_get_content_crc64: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.download.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        if range_get_content_md5 is not None:
-            header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool')
-        if range_get_content_crc64 is not None:
-            header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 206]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-        if response.status_code == 206:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    download.metadata = {'url': '/{containerName}/{blob}'}
-
-    def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Get Properties operation returns all user-defined metadata,
-        standard HTTP properties, and system properties for the blob. It does
-        not return the content of the blob.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')),
-                'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')),
-                'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')),
-                'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')),
-                'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{containerName}/{blob}'}
-
-    def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """If the storage account's soft delete feature is disabled then, when a
-        blob is deleted, it is permanently removed from the storage account. If
-        the storage account's soft delete feature is enabled, then, when a blob
-        is deleted, it is marked for deletion and becomes inaccessible
-        immediately. However, the blob service retains the blob or snapshot for
-        the number of days specified by the DeleteRetentionPolicy section of
-        [Storage service properties] (Set-Blob-Service-Properties.md). After
-        the specified number of days has passed, the blob's data is permanently
-        removed from the storage account. Note that you continue to be charged
-        for the soft-deleted blob's storage until it is permanently removed.
-        Use the List Blobs API and specify the "include=deleted" query
-        parameter to discover which blobs and snapshots have been soft deleted.
-        You can then use the Undelete Blob API to restore a soft-deleted blob.
-        All other operations on a soft-deleted blob or snapshot causes the
-        service to return an HTTP status code of 404 (ResourceNotFound).
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param delete_snapshots: Required if the blob has associated
-         snapshots. Specify one of the following two options: include: Delete
-         the base blob and all of its snapshots. only: Delete only the blob's
-         snapshots and not the blob itself. Possible values include: 'include',
-         'only'
-        :type delete_snapshots: str or
-         ~azure.storage.blob.models.DeleteSnapshotsOptionType
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if delete_snapshots is not None:
-            header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{containerName}/{blob}'}
-
-    def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_acl: Sets POSIX access control rights on files and
-         directories. The value is a comma-separated list of access control
-         entries. Each access control entry (ACE) consists of a scope, a type,
-         a user or group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type posix_acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "setAccessControl"
-
-        # Construct URL
-        url = self.set_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    set_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Get the owner, group, permissions, or access control list for a blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the identity values returned in
-         the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "getAccessControl"
-
-        # Construct URL
-        url = self.get_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
-                'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
-                'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
-                'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    get_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
-        """Rename a blob/file.  By default, the destination is overwritten and if
-        the destination already exists and has a lease the lease is broken.
-        This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param rename_source: The file or directory to be renamed. The value
-         must have the following format: "/{filesysystem}/{path}".  If
-         "x-ms-properties" is specified, the properties will overwrite the
-         existing properties; otherwise, the existing properties will be
-         preserved.
-        :type rename_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param path_rename_mode: Determines the behavior of the rename
-         operation. Possible values include: 'legacy', 'posix'
-        :type path_rename_mode: str or
-         ~azure.storage.blob.models.PathRenameMode
-        :param directory_properties: Optional.  User-defined properties to be
-         stored with the file or directory, in the format of a comma-separated
-         list of name and value pairs "n1=v1, n2=v2, ...", where each value is
-         base64 encoded.
-        :type directory_properties: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_umask: Only valid if Hierarchical Namespace is enabled
-         for the account. This umask restricts permission settings for file and
-         directory, and will only be applied when default Acl does not exist in
-         parent directory. If the umask bit has set, it means that the
-         corresponding permission will be disabled. Otherwise the corresponding
-         permission will be determined by the permission. A 4-digit octal
-         notation (e.g. 0022) is supported here. If no umask was specified, a
-         default umask - 0027 will be used.
-        :type posix_umask: str
-        :param source_lease_id: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param directory_http_headers: Additional parameters for the operation
-        :type directory_http_headers:
-         ~azure.storage.blob.models.DirectoryHttpHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if directory_http_headers is not None:
-            cache_control = directory_http_headers.cache_control
-        content_type = None
-        if directory_http_headers is not None:
-            content_type = directory_http_headers.content_type
-        content_encoding = None
-        if directory_http_headers is not None:
-            content_encoding = directory_http_headers.content_encoding
-        content_language = None
-        if directory_http_headers is not None:
-            content_language = directory_http_headers.content_language
-        content_disposition = None
-        if directory_http_headers is not None:
-            content_disposition = directory_http_headers.content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        # Construct URL
-        url = self.rename.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if path_rename_mode is not None:
-            query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
-        if directory_properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
-        if source_lease_id is not None:
-            header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    rename.metadata = {'url': '/{filesystem}/{path}'}
-
-    def undelete(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """Undelete a blob that was previously soft deleted.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "undelete"
-
-        # Construct URL
-        url = self.undelete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    undelete.metadata = {'url': '/{containerName}/{blob}'}
-
-    def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Set HTTP Headers operation sets system properties on the blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_http_headers.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_http_headers.metadata = {'url': '/{containerName}/{blob}'}
-
-    def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Set Blob Metadata operation sets user-defined metadata for the
-        specified blob as one or more name-value pairs.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{containerName}/{blob}'}
-
-    def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param duration: Specifies the duration of the lease, in seconds, or
-         negative one (-1) for a lease that never expires. A non-infinite lease
-         can be between 15 and 60 seconds. A lease duration cannot be changed
-         using renew or change.
-        :type duration: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "acquire"
-
-        # Construct URL
-        url = self.acquire_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    acquire_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "release"
-
-        # Construct URL
-        url = self.release_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    release_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "renew"
-
-        # Construct URL
-        url = self.renew_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    renew_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "change"
-
-        # Construct URL
-        url = self.change_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    change_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] The Lease Blob operation establishes and manages a lock on a
-        blob for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param break_period: For a break operation, proposed duration the
-         lease should continue before it is broken, in seconds, between 0 and
-         60. This break period is only used if it is shorter than the time
-         remaining on the lease. If longer, the time remaining on the lease is
-         used. A new lease will not be available before the break period has
-         expired, but the lease may be held for longer than the break period.
-         If this header does not appear with a break operation, a
-         fixed-duration lease breaks after the remaining lease period elapses,
-         and an infinite lease breaks immediately.
-        :type break_period: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "lease"
-        action = "break"
-
-        # Construct URL
-        url = self.break_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if break_period is not None:
-            header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    break_lease.metadata = {'url': '/{containerName}/{blob}'}
-
-    def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
-        """The Create Snapshot operation creates a read-only snapshot of a blob.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "snapshot"
-
-        # Construct URL
-        url = self.create_snapshot.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create_snapshot.metadata = {'url': '/{containerName}/{blob}'}
-
-    def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
-        """The Start Copy From URL operation copies a blob or an internet resource
-        to a new blob.
-
-        :param copy_source: Specifies the name of the source page blob
-         snapshot. This value is a URL of up to 2 KB in length that specifies a
-         page blob snapshot. The value should be URL-encoded as it would appear
-         in a request URI. The source blob must either be public or must be
-         authenticated via a shared access signature.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param rehydrate_priority: Optional: Indicates the priority with which
-         to rehydrate an archived blob. Possible values include: 'High',
-         'Standard'
-        :type rehydrate_priority: str or
-         ~azure.storage.blob.models.RehydratePriority
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.start_copy_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        if rehydrate_priority is not None:
-            header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
-        """The Copy From URL operation copies a blob or an internet resource to a
-        new blob. It will not return a response until the copy is complete.
-
-        :param copy_source: Specifies the name of the source page blob
-         snapshot. This value is a URL of up to 2 KB in length that specifies a
-         page blob snapshot. The value should be URL-encoded as it would appear
-         in a request URI. The source blob must either be public or must be
-         authenticated via a shared access signature.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.copy_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
-        """The Abort Copy From URL operation aborts a pending Copy From URL
-        operation, and leaves a destination blob with zero length and full
-        metadata.
-
-        :param copy_id: The copy identifier provided in the x-ms-copy-id
-         header of the original Copy Blob operation.
-        :type copy_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "copy"
-
-        # Construct URL
-        url = self.abort_copy_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
-        """The Set Tier operation sets the tier on a blob. The operation is
-        allowed on a page blob in a premium storage account and on a block blob
-        in a blob storage account (locally redundant storage only). A premium
-        page blob's tier determines the allowed size, IOPS, and bandwidth of
-        the blob. A block blob's tier determines Hot/Cool/Archive storage type.
-        This operation does not update the blob's ETag.
-
-        :param tier: Indicates the tier to be set on the blob. Possible values
-         include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60',
-         'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierRequired
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param rehydrate_priority: Optional: Indicates the priority with which
-         to rehydrate an archived blob. Possible values include: 'High',
-         'Standard'
-        :type rehydrate_priority: str or
-         ~azure.storage.blob.models.RehydratePriority
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "tier"
-
-        # Construct URL
-        url = self.set_tier.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        if rehydrate_priority is not None:
-            header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_tier.metadata = {'url': '/{containerName}/{blob}'}
-
-    def get_account_info(self, cls=None, **kwargs):
-        """Returns the sku name and account kind .
-
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_account_info.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
-                'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_account_info.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_block_blob_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_block_blob_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_block_blob_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_block_blob_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,802 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class BlockBlobOperations(object):
-    """BlockBlobOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_blob_type = "BlockBlob"
-
-    def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Upload Block Blob operation updates the content of an existing
-        block blob. Updating an existing block blob overwrites any existing
-        metadata on the blob. Partial updates are not supported with Put Blob;
-        the content of the existing blob is overwritten with the content of the
-        new blob. To perform a partial update of the content of a block blob,
-        use the Put Block List operation.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.upload.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload.metadata = {'url': '/{containerName}/{blob}'}
-
-    def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, cls=None, **kwargs):
-        """The Stage Block operation creates a new block to be committed as part
-        of a blob.
-
-        :param block_id: A valid Base64 string value that identifies the
-         block. Prior to encoding, the string must be less than or equal to 64
-         bytes in size. For a given blob, the length of the value specified for
-         the blockid parameter must be the same size for each block.
-        :type block_id: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param body: Initial data
-        :type body: Generator
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-
-        comp = "block"
-
-        # Construct URL
-        url = self.stage_block.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    stage_block.metadata = {'url': '/{containerName}/{blob}'}
-
-    def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
-        """The Stage Block operation creates a new block to be committed as part
-        of a blob where the contents are read from a URL.
-
-        :param block_id: A valid Base64 string value that identifies the
-         block. Prior to encoding, the string must be less than or equal to 64
-         bytes in size. For a given blob, the length of the value specified for
-         the blockid parameter must be the same size for each block.
-        :type block_id: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param source_url: Specify a URL to the copy source.
-        :type source_url: str
-        :param source_range: Bytes of source data in the specified range.
-        :type source_range: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_contentcrc64: Specify the crc64 calculated for the range
-         of bytes that must be read from the copy source.
-        :type source_contentcrc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        comp = "block"
-
-        # Construct URL
-        url = self.stage_block_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
-        if source_range is not None:
-            header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        if source_contentcrc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Commit Block List operation writes a blob by specifying the list of
-        block IDs that make up the blob. In order to be written as part of a
-        blob, a block must have been successfully written to the server in a
-        prior Put Block operation. You can call Put Block List to update a blob
-        by uploading only those blocks that have changed, then committing the
-        new and existing blocks together. You can do this by specifying whether
-        to commit a block from the committed block list or from the uncommitted
-        block list, or to commit the most recently uploaded version of the
-        block, whichever list it may belong to.
-
-        :param blocks:
-        :type blocks: ~azure.storage.blob.models.BlockLookupList
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param tier: Optional. Indicates the tier to be set on the blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
-        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "blocklist"
-
-        # Construct URL
-        url = self.commit_block_list.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(blocks, 'BlockLookupList')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    commit_block_list.metadata = {'url': '/{containerName}/{blob}'}
-
-    def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
-        """The Get Block List operation retrieves the list of blocks that have
-        been uploaded as part of a block blob.
-
-        :param list_type: Specifies whether to return the list of committed
-         blocks, the list of uncommitted blocks, or both lists together.
-         Possible values include: 'committed', 'uncommitted', 'all'
-        :type list_type: str or ~azure.storage.blob.models.BlockListType
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: BlockList or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.BlockList
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "blocklist"
-
-        # Construct URL
-        url = self.get_block_list.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('BlockList', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_block_list.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_container_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_container_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_container_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_container_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1327 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class ContainerOperations(object):
-    """ContainerOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, cls=None, **kwargs):
-        """creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param access: Specifies whether data in the container may be accessed
-         publicly and the level of access. Possible values include:
-         'container', 'blob'
-        :type access: str or ~azure.storage.blob.models.PublicAccessType
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param container_cpk_scope_info: Additional parameters for the
-         operation
-        :type container_cpk_scope_info:
-         ~azure.storage.blob.models.ContainerCpkScopeInfo
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        default_encryption_scope = None
-        if container_cpk_scope_info is not None:
-            default_encryption_scope = container_cpk_scope_info.default_encryption_scope
-        prevent_encryption_scope_override = None
-        if container_cpk_scope_info is not None:
-            prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
-
-        restype = "container"
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if access is not None:
-            header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if default_encryption_scope is not None:
-            header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str')
-        if prevent_encryption_scope_override is not None:
-            header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{containerName}'}
-
-    def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
-        """returns all user-defined metadata and system properties for the
-        specified container. The data returned does not include the container's
-        list of blobs.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        restype = "container"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
-                'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')),
-                'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')),
-                'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')),
-                'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{containerName}'}
-
-    def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """operation marks the specified container for deletion. The container and
-        any blobs contained within it are later deleted during garbage
-        collection.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        restype = "container"
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{containerName}'}
-
-    def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """operation sets one or more user-defined name-value pairs for the
-        specified container.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-
-        restype = "container"
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{containerName}'}
-
-    def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
-        """gets the permissions for the specified container. The permissions
-        indicate whether container data may be accessed publicly.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.blob.models.SignedIdentifier]
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        restype = "container"
-        comp = "acl"
-
-        # Construct URL
-        url = self.get_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[SignedIdentifier]', response)
-            header_dict = {
-                'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_access_policy.metadata = {'url': '/{containerName}'}
-
-    def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """sets the permissions for the specified container. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param container_acl: the acls for the container
-        :type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param access: Specifies whether data in the container may be accessed
-         publicly and the level of access. Possible values include:
-         'container', 'blob'
-        :type access: str or ~azure.storage.blob.models.PublicAccessType
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        restype = "container"
-        comp = "acl"
-
-        # Construct URL
-        url = self.set_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        if access is not None:
-            header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct body
-        serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}}
-        if container_acl is not None:
-            body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_access_policy.metadata = {'url': '/{containerName}'}
-
-    def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param duration: Specifies the duration of the lease, in seconds, or
-         negative one (-1) for a lease that never expires. A non-infinite lease
-         can be between 15 and 60 seconds. A lease duration cannot be changed
-         using renew or change.
-        :type duration: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "acquire"
-
-        # Construct URL
-        url = self.acquire_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    acquire_lease.metadata = {'url': '/{containerName}'}
-
-    def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "release"
-
-        # Construct URL
-        url = self.release_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    release_lease.metadata = {'url': '/{containerName}'}
-
-    def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "renew"
-
-        # Construct URL
-        url = self.renew_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    renew_lease.metadata = {'url': '/{containerName}'}
-
-    def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param break_period: For a break operation, proposed duration the
-         lease should continue before it is broken, in seconds, between 0 and
-         60. This break period is only used if it is shorter than the time
-         remaining on the lease. If longer, the time remaining on the lease is
-         used. A new lease will not be available before the break period has
-         expired, but the lease may be held for longer than the break period.
-         If this header does not appear with a break operation, a
-         fixed-duration lease breaks after the remaining lease period elapses,
-         and an infinite lease breaks immediately.
-        :type break_period: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "break"
-
-        # Construct URL
-        url = self.break_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if break_period is not None:
-            header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    break_lease.metadata = {'url': '/{containerName}'}
-
-    def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """[Update] establishes and manages a lock on a container for delete
-        operations. The lock duration can be 15 to 60 seconds, or can be
-        infinite.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        comp = "lease"
-        restype = "container"
-        action = "change"
-
-        # Construct URL
-        url = self.change_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    change_lease.metadata = {'url': '/{containerName}'}
-
-    def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """[Update] The List Blobs operation returns a list of the blobs under the
-        specified container.
-
-        :param prefix: Filters the results to return only containers whose
-         name begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of containers to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all containers remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of containers to
-         return. If the request does not specify maxresults, or specifies a
-         value greater than 5000, the server will return up to 5000 items. Note
-         that if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify one or more datasets
-         to include in the response.
-        :type include: list[str or
-         ~azure.storage.blob.models.ListBlobsIncludeItem]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListBlobsFlatSegmentResponse or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "container"
-        comp = "list"
-
-        # Construct URL
-        url = self.list_blob_flat_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_blob_flat_segment.metadata = {'url': '/{containerName}'}
-
-    def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """[Update] The List Blobs operation returns a list of the blobs under the
-        specified container.
-
-        :param delimiter: When the request includes this parameter, the
-         operation returns a BlobPrefix element in the response body that acts
-         as a placeholder for all blobs whose names begin with the same
-         substring up to the appearance of the delimiter character. The
-         delimiter may be a single character or a string.
-        :type delimiter: str
-        :param prefix: Filters the results to return only containers whose
-         name begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of containers to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all containers remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of containers to
-         return. If the request does not specify maxresults, or specifies a
-         value greater than 5000, the server will return up to 5000 items. Note
-         that if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify one or more datasets
-         to include in the response.
-        :type include: list[str or
-         ~azure.storage.blob.models.ListBlobsIncludeItem]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListBlobsHierarchySegmentResponse or the result of
-         cls(response)
-        :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "container"
-        comp = "list"
-
-        # Construct URL
-        url = self.list_blob_hierarchy_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'}
-
-    def get_account_info(self, cls=None, **kwargs):
-        """Returns the sku name and account kind .
-
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "account"
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_account_info.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
-                'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_account_info.metadata = {'url': '/{containerName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_directory_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_directory_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_directory_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_directory_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,740 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class DirectoryOperations(object):
-    """DirectoryOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar resource: . Constant value: "directory".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.resource = "directory"
-
-    def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Create a directory. By default, the destination is overwritten and if
-        the destination already exists and has a lease the lease is broken.
-        This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param directory_properties: Optional.  User-defined properties to be
-         stored with the file or directory, in the format of a comma-separated
-         list of name and value pairs "n1=v1, n2=v2, ...", where each value is
-         base64 encoded.
-        :type directory_properties: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_umask: Only valid if Hierarchical Namespace is enabled
-         for the account. This umask restricts permission settings for file and
-         directory, and will only be applied when default Acl does not exist in
-         parent directory. If the umask bit has set, it means that the
-         corresponding permission will be disabled. Otherwise the corresponding
-         permission will be determined by the permission. A 4-digit octal
-         notation (e.g. 0022) is supported here. If no umask was specified, a
-         default umask - 0027 will be used.
-        :type posix_umask: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param directory_http_headers: Additional parameters for the operation
-        :type directory_http_headers:
-         ~azure.storage.blob.models.DirectoryHttpHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if directory_http_headers is not None:
-            cache_control = directory_http_headers.cache_control
-        content_type = None
-        if directory_http_headers is not None:
-            content_type = directory_http_headers.content_type
-        content_encoding = None
-        if directory_http_headers is not None:
-            content_encoding = directory_http_headers.content_encoding
-        content_language = None
-        if directory_http_headers is not None:
-            content_language = directory_http_headers.content_language
-        content_disposition = None
-        if directory_http_headers is not None:
-            content_disposition = directory_http_headers.content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if directory_properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{filesystem}/{path}'}
-
-    def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
-        """Rename a directory. By default, the destination is overwritten and if
-        the destination already exists and has a lease the lease is broken.
-        This operation supports conditional HTTP requests. For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param rename_source: The file or directory to be renamed. The value
-         must have the following format: "/{filesysystem}/{path}".  If
-         "x-ms-properties" is specified, the properties will overwrite the
-         existing properties; otherwise, the existing properties will be
-         preserved.
-        :type rename_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param marker: When renaming a directory, the number of paths that are
-         renamed with each invocation is limited.  If the number of paths to be
-         renamed exceeds this limit, a continuation token is returned in this
-         response header.  When a continuation token is returned in the
-         response, it must be specified in a subsequent invocation of the
-         rename operation to continue renaming the directory.
-        :type marker: str
-        :param path_rename_mode: Determines the behavior of the rename
-         operation. Possible values include: 'legacy', 'posix'
-        :type path_rename_mode: str or
-         ~azure.storage.blob.models.PathRenameMode
-        :param directory_properties: Optional.  User-defined properties to be
-         stored with the file or directory, in the format of a comma-separated
-         list of name and value pairs "n1=v1, n2=v2, ...", where each value is
-         base64 encoded.
-        :type directory_properties: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_umask: Only valid if Hierarchical Namespace is enabled
-         for the account. This umask restricts permission settings for file and
-         directory, and will only be applied when default Acl does not exist in
-         parent directory. If the umask bit has set, it means that the
-         corresponding permission will be disabled. Otherwise the corresponding
-         permission will be determined by the permission. A 4-digit octal
-         notation (e.g. 0022) is supported here. If no umask was specified, a
-         default umask - 0027 will be used.
-        :type posix_umask: str
-        :param source_lease_id: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param directory_http_headers: Additional parameters for the operation
-        :type directory_http_headers:
-         ~azure.storage.blob.models.DirectoryHttpHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if directory_http_headers is not None:
-            cache_control = directory_http_headers.cache_control
-        content_type = None
-        if directory_http_headers is not None:
-            content_type = directory_http_headers.content_type
-        content_encoding = None
-        if directory_http_headers is not None:
-            content_encoding = directory_http_headers.content_encoding
-        content_language = None
-        if directory_http_headers is not None:
-            content_language = directory_http_headers.content_language
-        content_disposition = None
-        if directory_http_headers is not None:
-            content_disposition = directory_http_headers.content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        # Construct URL
-        url = self.rename.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if marker is not None:
-            query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
-        if path_rename_mode is not None:
-            query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
-        if directory_properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
-        if source_lease_id is not None:
-            header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    rename.metadata = {'url': '/{filesystem}/{path}'}
-
-    def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Deletes the directory.
-
-        :param recursive_directory_delete: If "true", all paths beneath the
-         directory will be deleted. If "false" and the directory is non-empty,
-         an error occurs.
-        :type recursive_directory_delete: bool
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param marker: When renaming a directory, the number of paths that are
-         renamed with each invocation is limited.  If the number of paths to be
-         renamed exceeds this limit, a continuation token is returned in this
-         response header.  When a continuation token is returned in the
-         response, it must be specified in a subsequent invocation of the
-         rename operation to continue renaming the directory.
-        :type marker: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool')
-        if marker is not None:
-            query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{filesystem}/{path}'}
-
-    def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a
-        directory.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param posix_permissions: Optional and only valid if Hierarchical
-         Namespace is enabled for the account. Sets POSIX access permissions
-         for the file owner, the file owning group, and others. Each class may
-         be granted read, write, or execute permission.  The sticky bit is also
-         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
-         0766) are supported.
-        :type posix_permissions: str
-        :param posix_acl: Sets POSIX access control rights on files and
-         directories. The value is a comma-separated list of access control
-         entries. Each access control entry (ACE) consists of a scope, a type,
-         a user or group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type posix_acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "setAccessControl"
-
-        # Construct URL
-        url = self.set_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if posix_permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
-        if posix_acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    set_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Get the owner, group, permissions, or access control list for a
-        directory.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the identity values returned in
-         the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`DataLakeStorageErrorException<azure.storage.blob.models.DataLakeStorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "getAccessControl"
-
-        # Construct URL
-        url = self.get_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.DataLakeStorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
-                'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
-                'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
-                'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-            }
-            return cls(response, None, response_headers)
-    get_access_control.metadata = {'url': '/{filesystem}/{path}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_page_blob_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_page_blob_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_page_blob_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_page_blob_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1348 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class PageBlobOperations(object):
-    """PageBlobOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_blob_type = "PageBlob"
-
-    def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Create operation creates a new page blob.
-
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param blob_content_length: This header specifies the maximum size for
-         the page blob, up to 1 TB. The page blob size must be aligned to a
-         512-byte boundary.
-        :type blob_content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param tier: Optional. Indicates the tier to be set on the page blob.
-         Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
-         'P40', 'P50', 'P60', 'P70', 'P80'
-        :type tier: str or
-         ~azure.storage.blob.models.PremiumPageBlobAccessTier
-        :param metadata: Optional. Specifies a user-defined name-value pair
-         associated with the blob. If no name-value pairs are specified, the
-         operation will copy the metadata from the source blob or file to the
-         destination blob. If one or more name-value pairs are specified, the
-         destination blob is created with the specified metadata, and metadata
-         is not copied from the source blob or file. Note that beginning with
-         version 2009-09-19, metadata names must adhere to the naming rules for
-         C# identifiers. See Naming and Referencing Containers, Blobs, and
-         Metadata for more information.
-        :type metadata: str
-        :param blob_sequence_number: Set for page blobs only. The sequence
-         number is a user-controlled value that you can use to track requests.
-         The value of the sequence number must be between 0 and 2^63 - 1.
-        :type blob_sequence_number: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param blob_http_headers: Additional parameters for the operation
-        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        blob_content_type = None
-        if blob_http_headers is not None:
-            blob_content_type = blob_http_headers.blob_content_type
-        blob_content_encoding = None
-        if blob_http_headers is not None:
-            blob_content_encoding = blob_http_headers.blob_content_encoding
-        blob_content_language = None
-        if blob_http_headers is not None:
-            blob_content_language = blob_http_headers.blob_content_language
-        blob_content_md5 = None
-        if blob_http_headers is not None:
-            blob_content_md5 = blob_http_headers.blob_content_md5
-        blob_cache_control = None
-        if blob_http_headers is not None:
-            blob_cache_control = blob_http_headers.blob_cache_control
-        blob_content_disposition = None
-        if blob_http_headers is not None:
-            blob_content_disposition = blob_http_headers.blob_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if tier is not None:
-            header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
-        if blob_sequence_number is not None:
-            header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
-        if blob_content_type is not None:
-            header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
-        if blob_content_encoding is not None:
-            header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
-        if blob_content_language is not None:
-            header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
-        if blob_content_md5 is not None:
-            header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
-        if blob_cache_control is not None:
-            header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
-        if blob_content_disposition is not None:
-            header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{containerName}/{blob}'}
-
-    def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Upload Pages operation writes a range of pages to a page blob.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param transactional_content_md5: Specify the transactional md5 for
-         the body, to be validated by the service.
-        :type transactional_content_md5: bytearray
-        :param transactional_content_crc64: Specify the transactional crc64
-         for the body, to be validated by the service.
-        :type transactional_content_crc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param sequence_number_access_conditions: Additional parameters for
-         the operation
-        :type sequence_number_access_conditions:
-         ~azure.storage.blob.models.SequenceNumberAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_sequence_number_less_than_or_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
-        if_sequence_number_less_than = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
-        if_sequence_number_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "page"
-        page_write = "update"
-
-        # Construct URL
-        url = self.upload_pages.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if transactional_content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
-        if transactional_content_crc64 is not None:
-            header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_sequence_number_less_than_or_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
-        if if_sequence_number_less_than is not None:
-            header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
-        if if_sequence_number_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_pages.metadata = {'url': '/{containerName}/{blob}'}
-
-    def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Clear Pages operation clears a set of pages from a page blob.
-
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param sequence_number_access_conditions: Additional parameters for
-         the operation
-        :type sequence_number_access_conditions:
-         ~azure.storage.blob.models.SequenceNumberAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_sequence_number_less_than_or_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
-        if_sequence_number_less_than = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
-        if_sequence_number_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "page"
-        page_write = "clear"
-
-        # Construct URL
-        url = self.clear_pages.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_sequence_number_less_than_or_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
-        if if_sequence_number_less_than is not None:
-            header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
-        if if_sequence_number_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    clear_pages.metadata = {'url': '/{containerName}/{blob}'}
-
-    def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
-        """The Upload Pages operation writes a range of pages to a page blob where
-        the contents are read from a URL.
-
-        :param source_url: Specify a URL to the copy source.
-        :type source_url: str
-        :param source_range: Bytes of source data in the specified range. The
-         length of this range should match the ContentLength header and
-         x-ms-range/Range destination range header.
-        :type source_range: str
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param range: The range of bytes to which the source range would be
-         written. The range should be 512 aligned and range-end is required.
-        :type range: str
-        :param source_content_md5: Specify the md5 calculated for the range of
-         bytes that must be read from the copy source.
-        :type source_content_md5: bytearray
-        :param source_contentcrc64: Specify the crc64 calculated for the range
-         of bytes that must be read from the copy source.
-        :type source_contentcrc64: bytearray
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param sequence_number_access_conditions: Additional parameters for
-         the operation
-        :type sequence_number_access_conditions:
-         ~azure.storage.blob.models.SequenceNumberAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.blob.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_sequence_number_less_than_or_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
-        if_sequence_number_less_than = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
-        if_sequence_number_equal_to = None
-        if sequence_number_access_conditions is not None:
-            if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-
-        comp = "page"
-        page_write = "update"
-
-        # Construct URL
-        url = self.upload_pages_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
-        header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        if source_content_md5 is not None:
-            header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
-        if source_contentcrc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_sequence_number_less_than_or_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
-        if if_sequence_number_less_than is not None:
-            header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
-        if if_sequence_number_equal_to is not None:
-            header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
-                'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'}
-
-    def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Get Page Ranges operation returns the list of valid page ranges for
-        a page blob or snapshot of a page blob.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: PageList or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.PageList
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "pagelist"
-
-        # Construct URL
-        url = self.get_page_ranges.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('PageList', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_page_ranges.metadata = {'url': '/{containerName}/{blob}'}
-
-    def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Get Page Ranges Diff operation returns the list of valid page
-        ranges for a page blob that were changed between target blob and
-        previous snapshot.
-
-        :param snapshot: The snapshot parameter is an opaque DateTime value
-         that, when present, specifies the blob snapshot to retrieve. For more
-         information on working with blob snapshots, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
-         a Snapshot of a Blob.</a>
-        :type snapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param prevsnapshot: Optional in version 2015-07-08 and newer. The
-         prevsnapshot parameter is a DateTime value that specifies that the
-         response will contain only pages that were changed between target blob
-         and previous snapshot. Changed pages include both updated and cleared
-         pages. The target blob may be a snapshot, as long as the snapshot
-         specified by prevsnapshot is the older of the two. Note that
-         incremental snapshots are currently supported only for blobs created
-         on or after January 1, 2016.
-        :type prevsnapshot: str
-        :param prev_snapshot_url: Optional. This header is only supported in
-         service versions 2019-04-19 and after and specifies the URL of a
-         previous snapshot of the target blob. The response will only contain
-         pages that were changed between the target blob and its previous
-         snapshot.
-        :type prev_snapshot_url: str
-        :param range: Return only the bytes of the blob in the specified
-         range.
-        :type range: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: PageList or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.PageList
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "pagelist"
-
-        # Construct URL
-        url = self.get_page_ranges_diff.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if snapshot is not None:
-            query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if prevsnapshot is not None:
-            query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if prev_snapshot_url is not None:
-            header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('PageList', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'}
-
-    def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Resize the Blob.
-
-        :param blob_content_length: This header specifies the maximum size for
-         the page blob, up to 1 TB. The page blob size must be aligned to a
-         512-byte boundary.
-        :type blob_content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param cpk_info: Additional parameters for the operation
-        :type cpk_info: ~azure.storage.blob.models.CpkInfo
-        :param cpk_scope_info: Additional parameters for the operation
-        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        encryption_key = None
-        if cpk_info is not None:
-            encryption_key = cpk_info.encryption_key
-        encryption_key_sha256 = None
-        if cpk_info is not None:
-            encryption_key_sha256 = cpk_info.encryption_key_sha256
-        encryption_algorithm = None
-        if cpk_info is not None:
-            encryption_algorithm = cpk_info.encryption_algorithm
-        encryption_scope = None
-        if cpk_scope_info is not None:
-            encryption_scope = cpk_scope_info.encryption_scope
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.resize.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if encryption_key is not None:
-            header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
-        if encryption_key_sha256 is not None:
-            header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
-        if encryption_algorithm is not None:
-            header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
-        if encryption_scope is not None:
-            header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    resize.metadata = {'url': '/{containerName}/{blob}'}
-
-    def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Update the sequence number of the blob.
-
-        :param sequence_number_action: Required if the
-         x-ms-blob-sequence-number header is set for the request. This property
-         applies to page blobs only. This property indicates how the service
-         should modify the blob's sequence number. Possible values include:
-         'max', 'update', 'increment'
-        :type sequence_number_action: str or
-         ~azure.storage.blob.models.SequenceNumberActionType
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param blob_sequence_number: Set for page blobs only. The sequence
-         number is a user-controlled value that you can use to track requests.
-         The value of the sequence number must be between 0 and 2^63 - 1.
-        :type blob_sequence_number: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.blob.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.update_sequence_number.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType')
-        if blob_sequence_number is not None:
-            header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    update_sequence_number.metadata = {'url': '/{containerName}/{blob}'}
-
-    def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
-        """The Copy Incremental operation copies a snapshot of the source page
-        blob to a destination page blob. The snapshot is copied such that only
-        the differential changes between the previously copied snapshot are
-        transferred to the destination. The copied snapshots are complete
-        copies of the original snapshot and can be read or copied from as
-        usual. This API is supported since REST version 2016-05-31.
-
-        :param copy_source: Specifies the name of the source page blob
-         snapshot. This value is a URL of up to 2 KB in length that specifies a
-         page blob snapshot. The value should be URL-encoded as it would appear
-         in a request URI. The source blob must either be public or must be
-         authenticated via a shared access signature.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.blob.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-
-        comp = "incrementalcopy"
-
-        # Construct URL
-        url = self.copy_incremental.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    copy_incremental.metadata = {'url': '/{containerName}/{blob}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_service_operations.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_service_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_service_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/operations/_service_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,566 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class ServiceOperations(object):
-    """ServiceOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs):
-        """Sets properties for a storage account's Blob service endpoint,
-        including properties for Storage Analytics and CORS (Cross-Origin
-        Resource Sharing) rules.
-
-        :param storage_service_properties: The StorageService properties.
-        :type storage_service_properties:
-         ~azure.storage.blob.models.StorageServiceProperties
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/'}
-
-    def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """gets the properties of a storage account's Blob service, including
-        properties for Storage Analytics and CORS (Cross-Origin Resource
-        Sharing) rules.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceProperties or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.StorageServiceProperties
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceProperties', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_properties.metadata = {'url': '/'}
-
-    def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """Retrieves statistics related to replication for the Blob service. It is
-        only available on the secondary location endpoint when read-access
-        geo-redundant replication is enabled for the storage account.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceStats or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.StorageServiceStats
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "stats"
-
-        # Construct URL
-        url = self.get_statistics.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceStats', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_statistics.metadata = {'url': '/'}
-
-    def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """The List Containers Segment operation returns a list of the containers
-        under the specified account.
-
-        :param prefix: Filters the results to return only containers whose
-         name begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of containers to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all containers remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of containers to
-         return. If the request does not specify maxresults, or specifies a
-         value greater than 5000, the server will return up to 5000 items. Note
-         that if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify that the container's
-         metadata be returned as part of the response body. Possible values
-         include: 'metadata'
-        :type include: str or
-         ~azure.storage.blob.models.ListContainersIncludeType
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListContainersSegmentResponse or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_containers_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, 'ListContainersIncludeType')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListContainersSegmentResponse', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_containers_segment.metadata = {'url': '/'}
-
-    def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=None, **kwargs):
-        """Retrieves a user delegation key for the Blob service. This is only a
-        valid operation when using bearer token authentication.
-
-        :param key_info:
-        :type key_info: ~azure.storage.blob.models.KeyInfo
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: UserDelegationKey or the result of cls(response)
-        :rtype: ~azure.storage.blob.models.UserDelegationKey
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "service"
-        comp = "userdelegationkey"
-
-        # Construct URL
-        url = self.get_user_delegation_key.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(key_info, 'KeyInfo')
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('UserDelegationKey', response)
-            header_dict = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_user_delegation_key.metadata = {'url': '/'}
-
-    def get_account_info(self, cls=None, **kwargs):
-        """Returns the sku name and account kind .
-
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        restype = "account"
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_account_info.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
-                'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_account_info.metadata = {'url': '/'}
-
-    def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Batch operation allows multiple API calls to be embedded into a
-        single HTTP request.
-
-        :param body: Initial data
-        :type body: Generator
-        :param content_length: The length of the request.
-        :type content_length: long
-        :param multipart_content_type: Required. The value of this header must
-         be multipart/mixed with a batch boundary. Example header value:
-         multipart/mixed; boundary=batch_<GUID>
-        :type multipart_content_type: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "batch"
-
-        # Construct URL
-        url = self.submit_batch.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    submit_batch.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/version.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/version.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_generated/version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-VERSION = "2019-07-07"
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_lease.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_lease.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_lease.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_lease.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,311 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import uuid
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
-    TypeVar, TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator import distributed_trace
-
-from ._shared.response_handlers import return_response_headers, process_storage_error
-from ._generated.models import StorageErrorException, LeaseAccessConditions
-from ._serialize import get_modify_conditions
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from ._generated.operations import BlobOperations, ContainerOperations
-    BlobClient = TypeVar("BlobClient")
-    ContainerClient = TypeVar("ContainerClient")
-
-
-def get_access_conditions(lease):
-    # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
-    try:
-        lease_id = lease.id # type: ignore
-    except AttributeError:
-        lease_id = lease # type: ignore
-    return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
-
-
-class BlobLeaseClient(object):
-    """Creates a new BlobLeaseClient.
-
-    This client provides lease operations on a BlobClient or ContainerClient.
-
-    :ivar str id:
-        The ID of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired.
-    :ivar str etag:
-        The ETag of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired or modified.
-    :ivar ~datetime.datetime last_modified:
-        The last modified timestamp of the lease currently being maintained.
-        This will be `None` if no lease has yet been acquired or modified.
-
-    :param client:
-        The client of the blob or container to lease.
-    :type client: ~azure.storage.blob.BlobClient or
-        ~azure.storage.blob.ContainerClient
-    :param str lease_id:
-        A string representing the lease ID of an existing lease. This value does not
-        need to be specified in order to acquire a new lease, or break one.
-    """
-    def __init__(
-            self, client, lease_id=None
-    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
-        # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None
-        self.id = lease_id or str(uuid.uuid4())
-        self.last_modified = None
-        self.etag = None
-        if hasattr(client, 'blob_name'):
-            self._client = client._client.blob  # type: ignore # pylint: disable=protected-access
-        elif hasattr(client, 'container_name'):
-            self._client = client._client.container  # type: ignore # pylint: disable=protected-access
-        else:
-            raise TypeError("Lease must use either BlobClient or ContainerClient.")
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *args):
-        self.release()
-
-    @distributed_trace
-    def acquire(self, lease_duration=-1, **kwargs):
-        # type: (int, **Any) -> None
-        """Requests a new lease.
-
-        If the container does not have an active lease, the Blob service creates a
-        lease on the container and returns a new lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = self._client.acquire_lease(
-                timeout=kwargs.pop('timeout', None),
-                duration=lease_duration,
-                proposed_lease_id=self.id,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-        self.etag = kwargs.get('etag')  # type: str
-
-    @distributed_trace
-    def renew(self, **kwargs):
-        # type: (Any) -> None
-        """Renews the lease.
-
-        The lease can be renewed if the lease ID specified in the
-        lease client matches that associated with the container or blob. Note that
-        the lease may be renewed even if it has expired as long as the container
-        or blob has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = self._client.renew_lease(
-                lease_id=self.id,
-                timeout=kwargs.pop('timeout', None),
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace
-    def release(self, **kwargs):
-        # type: (Any) -> None
-        """Release the lease.
-
-        The lease may be released if the client lease id specified matches
-        that associated with the container or blob. Releasing the lease allows another client
-        to immediately acquire the lease for the container or blob as soon as the release is complete.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = self._client.release_lease(
-                lease_id=self.id,
-                timeout=kwargs.pop('timeout', None),
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace
-    def change(self, proposed_lease_id, **kwargs):
-        # type: (str, Any) -> None
-        """Change the lease ID of an active lease.
-
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = self._client.change_lease(
-                lease_id=self.id,
-                proposed_lease_id=proposed_lease_id,
-                timeout=kwargs.pop('timeout', None),
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace
-    def break_lease(self, lease_break_period=None, **kwargs):
-        # type: (Optional[int], Any) -> int
-        """Break the lease, if the container or blob has an active lease.
-
-        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container or blob.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired.
-
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :rtype: int
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = self._client.break_lease(
-                timeout=kwargs.pop('timeout', None),
-                break_period=lease_break_period,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return response.get('lease_time') # type: ignore
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_models.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1094 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called, too-many-lines
-
-from enum import Enum
-
-from azure.core.paging import PageIterator, ItemPaged
-
-from ._shared import decode_base64_to_text
-from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
-from ._shared.models import DictMixin, get_enum_value
-from ._generated.models import Logging as GeneratedLogging
-from ._generated.models import Metrics as GeneratedMetrics
-from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
-from ._generated.models import StaticWebsite as GeneratedStaticWebsite
-from ._generated.models import CorsRule as GeneratedCorsRule
-from ._generated.models import AccessPolicy as GenAccessPolicy
-from ._generated.models import StorageErrorException
-from ._generated.models import BlobPrefix as GenBlobPrefix
-from ._generated.models import BlobItem
-
-
-class BlobType(str, Enum):
-
-    BlockBlob = "BlockBlob"
-    PageBlob = "PageBlob"
-    AppendBlob = "AppendBlob"
-
-
-class BlockState(str, Enum):
-    """Block blob block types."""
-
-    Committed = 'Committed'  #: Committed blocks.
-    Latest = 'Latest'  #: Latest blocks.
-    Uncommitted = 'Uncommitted'  #: Uncommitted blocks.
-
-
-class StandardBlobTier(str, Enum):
-    """
-    Specifies the blob tier to set the blob to. This is only applicable for
-    block blobs on standard storage accounts.
-    """
-
-    Archive = 'Archive'  #: Archive
-    Cool = 'Cool'  #: Cool
-    Hot = 'Hot'  #: Hot
-
-
-class PremiumPageBlobTier(str, Enum):
-    """
-    Specifies the page blob tier to set the blob to. This is only applicable to page
-    blobs on premium storage accounts. Please take a look at:
-    https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
-    for detailed information on the corresponding IOPS and throughput per PageBlobTier.
-    """
-
-    P4 = 'P4'  #: P4 Tier
-    P6 = 'P6'  #: P6 Tier
-    P10 = 'P10'  #: P10 Tier
-    P20 = 'P20'  #: P20 Tier
-    P30 = 'P30'  #: P30 Tier
-    P40 = 'P40'  #: P40 Tier
-    P50 = 'P50'  #: P50 Tier
-    P60 = 'P60'  #: P60 Tier
-
-
-class SequenceNumberAction(str, Enum):
-    """Sequence number actions."""
-
-    Increment = 'increment'
-    """
-    Increments the value of the sequence number by 1. If specifying this option,
-    do not include the x-ms-blob-sequence-number header.
-    """
-
-    Max = 'max'
-    """
-    Sets the sequence number to be the higher of the value included with the
-    request and the value currently stored for the blob.
-    """
-
-    Update = 'update'
-    """Sets the sequence number to the value included with the request."""
-
-
-class PublicAccess(str, Enum):
-    """
-    Specifies whether data in the container may be accessed publicly and the level of access.
-    """
-
-    OFF = 'off'
-    """
-    Specifies that there is no public read access for both the container and blobs within the container.
-    Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
-    """
-
-    Blob = 'blob'
-    """
-    Specifies public read access for blobs. Blob data within this container can be read
-    via anonymous request, but container data is not available. Clients cannot enumerate
-    blobs within the container via anonymous request.
-    """
-
-    Container = 'container'
-    """
-    Specifies full public read access for container and blob data. Clients can enumerate
-    blobs within the container via anonymous request, but cannot enumerate containers
-    within the storage account.
-    """
-
-
-class BlobAnalyticsLogging(GeneratedLogging):
-    """Azure Analytics Logging settings.
-
-    :keyword str version:
-        The version of Storage Analytics to configure. The default value is 1.0.
-    :keyword bool delete:
-        Indicates whether all delete requests should be logged. The default value is `False`.
-    :keyword bool read:
-        Indicates whether all read requests should be logged. The default value is `False`.
-    :keyword bool write:
-        Indicates whether all write requests should be logged. The default value is `False`.
-    :keyword ~azure.storage.blob.RetentionPolicy retention_policy:
-        Determines how long the associated data should persist. If not specified the retention
-        policy will be disabled by default.
-    """
-
-    def __init__(self, **kwargs):
-        self.version = kwargs.get('version', u'1.0')
-        self.delete = kwargs.get('delete', False)
-        self.read = kwargs.get('read', False)
-        self.write = kwargs.get('write', False)
-        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            version=generated.version,
-            delete=generated.delete,
-            read=generated.read,
-            write=generated.write,
-            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
-        )
-
-
-class Metrics(GeneratedMetrics):
-    """A summary of request statistics grouped by API in hour or minute aggregates
-    for blobs.
-
-    :keyword str version:
-        The version of Storage Analytics to configure. The default value is 1.0.
-    :keyword bool enabled:
-        Indicates whether metrics are enabled for the Blob service.
-        The default value is `False`.
-    :keyword bool include_apis:
-        Indicates whether metrics should generate summary statistics for called API operations.
-    :keyword ~azure.storage.blob.RetentionPolicy retention_policy:
-        Determines how long the associated data should persist. If not specified the retention
-        policy will be disabled by default.
-    """
-
-    def __init__(self, **kwargs):
-        self.version = kwargs.get('version', u'1.0')
-        self.enabled = kwargs.get('enabled', False)
-        self.include_apis = kwargs.get('include_apis')
-        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            version=generated.version,
-            enabled=generated.enabled,
-            include_apis=generated.include_apis,
-            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
-        )
-
-
-class RetentionPolicy(GeneratedRetentionPolicy):
-    """The retention policy which determines how long the associated data should
-    persist.
-
-    :param bool enabled:
-        Indicates whether a retention policy is enabled for the storage service.
-        The default value is False.
-    :param int days:
-        Indicates the number of days that metrics or logging or
-        soft-deleted data should be retained. All data older than this value will
-        be deleted. If enabled=True, the number of days must be specified.
-    """
-
-    def __init__(self, enabled=False, days=None):
-        self.enabled = enabled
-        self.days = days
-        if self.enabled and (self.days is None):
-            raise ValueError("If policy is enabled, 'days' must be specified.")
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            enabled=generated.enabled,
-            days=generated.days,
-        )
-
-
-class StaticWebsite(GeneratedStaticWebsite):
-    """The properties that enable an account to host a static website.
-
-    :keyword bool enabled:
-        Indicates whether this account is hosting a static website.
-        The default value is `False`.
-    :keyword str index_document:
-        The default name of the index page under each directory.
-    :keyword str error_document404_path:
-        The absolute path of the custom 404 page.
-    """
-
-    def __init__(self, **kwargs):
-        self.enabled = kwargs.get('enabled', False)
-        if self.enabled:
-            self.index_document = kwargs.get('index_document')
-            self.error_document404_path = kwargs.get('error_document404_path')
-        else:
-            self.index_document = None
-            self.error_document404_path = None
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            enabled=generated.enabled,
-            index_document=generated.index_document,
-            error_document404_path=generated.error_document404_path,
-        )
-
-
-class CorsRule(GeneratedCorsRule):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    :param list(str) allowed_origins:
-        A list of origin domains that will be allowed via CORS, or "*" to allow
-        all domains. The list of must contain at least one entry. Limited to 64
-        origin domains. Each allowed origin can have up to 256 characters.
-    :param list(str) allowed_methods:
-        A list of HTTP methods that are allowed to be executed by the origin.
-        The list of must contain at least one entry. For Azure Storage,
-        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-    :keyword list(str) allowed_headers:
-        Defaults to an empty list. A list of headers allowed to be part of
-        the cross-origin request. Limited to 64 defined headers and 2 prefixed
-        headers. Each header can be up to 256 characters.
-    :keyword list(str) exposed_headers:
-        Defaults to an empty list. A list of response headers to expose to CORS
-        clients. Limited to 64 defined headers and two prefixed headers. Each
-        header can be up to 256 characters.
-    :keyword int max_age_in_seconds:
-        The number of seconds that the client/browser should cache a
-        preflight response.
-    """
-
-    def __init__(self, allowed_origins, allowed_methods, **kwargs):
-        self.allowed_origins = ','.join(allowed_origins)
-        self.allowed_methods = ','.join(allowed_methods)
-        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
-        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
-        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
-
-    @classmethod
-    def _from_generated(cls, generated):
-        return cls(
-            [generated.allowed_origins],
-            [generated.allowed_methods],
-            allowed_headers=[generated.allowed_headers],
-            exposed_headers=[generated.exposed_headers],
-            max_age_in_seconds=generated.max_age_in_seconds,
-        )
-
-
-class ContainerProperties(DictMixin):
-    """Blob container's properties class.
-
-    Returned ``ContainerProperties`` instances expose these values through a
-    dictionary interface, for example: ``container_props["last_modified"]``.
-    Additionally, the container name is available as ``container_props["name"]``.
-
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the container was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar ~azure.storage.blob.LeaseProperties lease:
-        Stores all the lease information for the container.
-    :ivar str public_access: Specifies whether data in the container may be accessed
-        publicly and the level of access.
-    :ivar bool has_immutability_policy:
-        Represents whether the container has an immutability policy.
-    :ivar bool has_legal_hold:
-        Represents whether the container has a legal hold.
-    :ivar dict metadata: A dict with name-value pairs to associate with the
-        container as metadata.
-    :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope:
-        The default encryption scope configuration for the container.
-    """
-
-    def __init__(self, **kwargs):
-        self.name = None
-        self.last_modified = kwargs.get('Last-Modified')
-        self.etag = kwargs.get('ETag')
-        self.lease = LeaseProperties(**kwargs)
-        self.public_access = kwargs.get('x-ms-blob-public-access')
-        self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy')
-        self.has_legal_hold = kwargs.get('x-ms-has-legal-hold')
-        self.metadata = kwargs.get('metadata')
-        self.encryption_scope = None
-        default_encryption_scope = kwargs.get('x-ms-default-encryption-scope')
-        if default_encryption_scope:
-            self.encryption_scope = ContainerEncryptionScope(
-                default_encryption_scope=default_encryption_scope,
-                prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False)
-            )
-
-    @classmethod
-    def _from_generated(cls, generated):
-        props = cls()
-        props.name = generated.name
-        props.last_modified = generated.properties.last_modified
-        props.etag = generated.properties.etag
-        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
-        props.public_access = generated.properties.public_access
-        props.has_immutability_policy = generated.properties.has_immutability_policy
-        props.has_legal_hold = generated.properties.has_legal_hold
-        props.metadata = generated.metadata
-        props.encryption_scope = ContainerEncryptionScope._from_generated(generated)  #pylint: disable=protected-access
-        return props
-
-
-class ContainerPropertiesPaged(PageIterator):
-    """An Iterable of Container properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A container name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.blob.ContainerProperties)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only containers whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of container names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(ContainerPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.current_page = [self._build_item(item) for item in self._response.container_items]
-
-        return self._response.next_marker or None, self.current_page
-
-    @staticmethod
-    def _build_item(item):
-        return ContainerProperties._from_generated(item)  # pylint: disable=protected-access
-
-
-class BlobProperties(DictMixin):
-    """
-    Blob Properties.
-
-    :ivar str name:
-        The name of the blob.
-    :ivar str container:
-        The container in which the blob resides.
-    :ivar str snapshot:
-        Datetime value that uniquely identifies the blob snapshot.
-    :ivar ~azure.blob.storage.BlobType blob_type:
-        String indicating this blob's type.
-    :ivar dict metadata:
-        Name-value pairs associated with the blob as metadata.
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the blob was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int size:
-        The size of the content returned. If the entire blob was requested,
-        the length of blob in bytes. If a subset of the blob was requested, the
-        length of the returned subset.
-    :ivar str content_range:
-        Indicates the range of bytes returned in the event that the client
-        requested a subset of the blob.
-    :ivar int append_blob_committed_block_count:
-        (For Append Blobs) Number of committed blocks in the blob.
-    :ivar int page_blob_sequence_number:
-        (For Page Blobs) Sequence number for page blob used for coordinating
-        concurrent writes.
-    :ivar bool server_encrypted:
-        Set to true if the blob is encrypted on the server.
-    :ivar ~azure.storage.blob.CopyProperties copy:
-        Stores all the copy properties for the blob.
-    :ivar ~azure.storage.blob.ContentSettings content_settings:
-        Stores all the content settings for the blob.
-    :ivar ~azure.storage.blob.LeaseProperties lease:
-        Stores all the lease information for the blob.
-    :ivar ~azure.storage.blob.StandardBlobTier blob_tier:
-        Indicates the access tier of the blob. The hot tier is optimized
-        for storing data that is accessed frequently. The cool storage tier
-        is optimized for storing data that is infrequently accessed and stored
-        for at least a month. The archive tier is optimized for storing
-        data that is rarely accessed and stored for at least six months
-        with flexible latency requirements.
-    :ivar ~datetime.datetime blob_tier_change_time:
-        Indicates when the access tier was last changed.
-    :ivar bool blob_tier_inferred:
-        Indicates whether the access tier was inferred by the service.
-        If false, it indicates that the tier was set explicitly.
-    :ivar bool deleted:
-        Whether this blob was deleted.
-    :ivar ~datetime.datetime deleted_time:
-        A datetime object representing the time at which the blob was deleted.
-    :ivar int remaining_retention_days:
-        The number of days that the blob will be retained before being permanently deleted by the service.
-    :ivar ~datetime.datetime creation_time:
-        Indicates when the blob was created, in UTC.
-    :ivar str archive_status:
-        Archive status of blob.
-    :ivar str encryption_key_sha256:
-        The SHA-256 hash of the provided encryption key.
-    :ivar str encryption_scope:
-        A predefined encryption scope used to encrypt the data on the service. An encryption
-        scope can be created using the Management API and referenced here by name. If a default
-        encryption scope has been defined at the container, this value will override it if the
-        container-level scope is configured to allow overrides. Otherwise an error will be raised.
-    :ivar bool request_server_encrypted:
-        Whether this blob is encrypted.
-    """
-
-    def __init__(self, **kwargs):
-        self.name = kwargs.get('name')
-        self.container = None
-        self.snapshot = kwargs.get('x-ms-snapshot')
-        self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None
-        self.metadata = kwargs.get('metadata')
-        self.encrypted_metadata = kwargs.get('encrypted_metadata')
-        self.last_modified = kwargs.get('Last-Modified')
-        self.etag = kwargs.get('ETag')
-        self.size = kwargs.get('Content-Length')
-        self.content_range = kwargs.get('Content-Range')
-        self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count')
-        self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number')
-        self.server_encrypted = kwargs.get('x-ms-server-encrypted')
-        self.copy = CopyProperties(**kwargs)
-        self.content_settings = ContentSettings(**kwargs)
-        self.lease = LeaseProperties(**kwargs)
-        self.blob_tier = kwargs.get('x-ms-access-tier')
-        self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time')
-        self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred')
-        self.deleted = False
-        self.deleted_time = None
-        self.remaining_retention_days = None
-        self.creation_time = kwargs.get('x-ms-creation-time')
-        self.archive_status = kwargs.get('x-ms-archive-status')
-        self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256')
-        self.encryption_scope = kwargs.get('x-ms-encryption-scope')
-        self.request_server_encrypted = kwargs.get('x-ms-server-encrypted')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        blob = BlobProperties()
-        blob.name = generated.name
-        blob_type = get_enum_value(generated.properties.blob_type)
-        blob.blob_type = BlobType(blob_type) if blob_type else None
-        blob.etag = generated.properties.etag
-        blob.deleted = generated.deleted
-        blob.snapshot = generated.snapshot
-        blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
-        blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
-        blob.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
-        blob.copy = CopyProperties._from_generated(generated)  # pylint: disable=protected-access
-        blob.last_modified = generated.properties.last_modified
-        blob.creation_time = generated.properties.creation_time
-        blob.content_settings = ContentSettings._from_generated(generated)  # pylint: disable=protected-access
-        blob.size = generated.properties.content_length
-        blob.page_blob_sequence_number = generated.properties.blob_sequence_number
-        blob.server_encrypted = generated.properties.server_encrypted
-        blob.encryption_scope = generated.properties.encryption_scope
-        blob.deleted_time = generated.properties.deleted_time
-        blob.remaining_retention_days = generated.properties.remaining_retention_days
-        blob.blob_tier = generated.properties.access_tier
-        blob.blob_tier_inferred = generated.properties.access_tier_inferred
-        blob.archive_status = generated.properties.archive_status
-        blob.blob_tier_change_time = generated.properties.access_tier_change_time
-        return blob
-
-
-class BlobPropertiesPaged(PageIterator):
-    """An Iterable of Blob properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A blob name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.blob.BlobProperties)
-    :ivar str container: The container that the blobs are listed from.
-    :ivar str delimiter: A delimiting character used for hierarchy listing.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str container: The name of the container.
-    :param str prefix: Filters the results to return only blobs whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of blobs to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    :param str delimiter:
-        Used to capture blobs whose names begin with the same substring up to
-        the appearance of the delimiter character. The delimiter may be a single
-        character or a string.
-    :param location_mode: Specifies the location the request should be sent to.
-        This mode only applies for RA-GRS accounts which allow secondary read access.
-        Options include 'primary' or 'secondary'.
-    """
-    def __init__(
-            self, command,
-            container=None,
-            prefix=None,
-            results_per_page=None,
-            continuation_token=None,
-            delimiter=None,
-            location_mode=None):
-        super(BlobPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.container = container
-        self.delimiter = delimiter
-        self.current_page = None
-        self.location_mode = location_mode
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                prefix=self.prefix,
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.container = self._response.container_name
-        self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
-
-        return self._response.next_marker or None, self.current_page
-
-    def _build_item(self, item):
-        if isinstance(item, BlobProperties):
-            return item
-        if isinstance(item, BlobItem):
-            blob = BlobProperties._from_generated(item)  # pylint: disable=protected-access
-            blob.container = self.container
-            return blob
-        return item
-
-
-class BlobPrefix(ItemPaged, DictMixin):
-    """An Iterable of Blob properties.
-
-    Returned from walk_blobs when a delimiter is used.
-    Can be thought of as a virtual blob directory.
-
-    :ivar str name: The prefix, or "directory name" of the blob.
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A blob name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str next_marker: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.blob.BlobProperties)
-    :ivar str container: The container that the blobs are listed from.
-    :ivar str delimiter: A delimiting character used for hierarchy listing.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only blobs whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of blobs to retrieve per
-        call.
-    :param str marker: An opaque continuation token.
-    :param str delimiter:
-        Used to capture blobs whose names begin with the same substring up to
-        the appearance of the delimiter character. The delimiter may be a single
-        character or a string.
-    :param location_mode: Specifies the location the request should be sent to.
-        This mode only applies for RA-GRS accounts which allow secondary read access.
-        Options include 'primary' or 'secondary'.
-    """
-    def __init__(self, *args, **kwargs):
-        super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
-        self.name = kwargs.get('prefix')
-        self.prefix = kwargs.get('prefix')
-        self.results_per_page = kwargs.get('results_per_page')
-        self.container = kwargs.get('container')
-        self.delimiter = kwargs.get('delimiter')
-        self.location_mode = kwargs.get('location_mode')
-
-
-class BlobPrefixPaged(BlobPropertiesPaged):
-    def __init__(self, *args, **kwargs):
-        super(BlobPrefixPaged, self).__init__(*args, **kwargs)
-        self.name = self.prefix
-
-    def _extract_data_cb(self, get_next_return):
-        continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
-        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
-        self.current_page = [self._build_item(item) for item in self.current_page]
-        self.delimiter = self._response.delimiter
-
-        return continuation_token, self.current_page
-
-    def _build_item(self, item):
-        item = super(BlobPrefixPaged, self)._build_item(item)
-        if isinstance(item, GenBlobPrefix):
-            return BlobPrefix(
-                self._command,
-                container=self.container,
-                prefix=item.name,
-                results_per_page=self.results_per_page,
-                location_mode=self.location_mode)
-        return item
-
-
-class LeaseProperties(DictMixin):
-    """Blob Lease Properties.
-
-    :ivar str status:
-        The lease status of the blob. Possible values: locked|unlocked
-    :ivar str state:
-        Lease state of the blob. Possible values: available|leased|expired|breaking|broken
-    :ivar str duration:
-        When a blob is leased, specifies whether the lease is of infinite or fixed duration.
-    """
-
-    def __init__(self, **kwargs):
-        self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
-        self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
-        self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
-
-    @classmethod
-    def _from_generated(cls, generated):
-        lease = cls()
-        lease.status = get_enum_value(generated.properties.lease_status)
-        lease.state = get_enum_value(generated.properties.lease_state)
-        lease.duration = get_enum_value(generated.properties.lease_duration)
-        return lease
-
-
-class ContentSettings(DictMixin):
-    """The content settings of a blob.
-
-    :param str content_type:
-        The content type specified for the blob. If no content type was
-        specified, the default content type is application/octet-stream.
-    :param str content_encoding:
-        If the content_encoding has previously been set
-        for the blob, that value is stored.
-    :param str content_language:
-        If the content_language has previously been set
-        for the blob, that value is stored.
-    :param str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the blob, that value is stored.
-    :param str cache_control:
-        If the cache_control has previously been set for
-        the blob, that value is stored.
-    :param str content_md5:
-        If the content_md5 has been set for the blob, this response
-        header is stored so that the client can check for message content
-        integrity.
-    """
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None, **kwargs):
-
-        self.content_type = content_type or kwargs.get('Content-Type')
-        self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
-        self.content_language = content_language or kwargs.get('Content-Language')
-        self.content_md5 = content_md5 or kwargs.get('Content-MD5')
-        self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
-        self.cache_control = cache_control or kwargs.get('Cache-Control')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        settings = cls()
-        settings.content_type = generated.properties.content_type or None
-        settings.content_encoding = generated.properties.content_encoding or None
-        settings.content_language = generated.properties.content_language or None
-        settings.content_md5 = generated.properties.content_md5 or None
-        settings.content_disposition = generated.properties.content_disposition or None
-        settings.cache_control = generated.properties.cache_control or None
-        return settings
-
-
-class CopyProperties(DictMixin):
-    """Blob Copy Properties.
-
-    These properties will be `None` if this blob has never been the destination
-    in a Copy Blob operation, or if this blob has been modified after a concluded
-    Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List.
-
-    :ivar str id:
-        String identifier for the last attempted Copy Blob operation where this blob
-        was the destination blob.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source blob used in the last attempted
-        Copy Blob operation where this blob was the destination blob.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending:
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy Blob.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy Blob operation where this blob was the destination blob. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar ~datetime.datetime completion_time:
-        Conclusion time of the last attempted Copy Blob operation where this blob was the
-        destination blob. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure.
-    :ivar bool incremental_copy:
-        Copies the snapshot of the source page blob to a destination page blob.
-        The snapshot is copied such that only the differential changes between
-        the previously copied snapshot are transferred to the destination
-    :ivar ~datetime.datetime destination_snapshot:
-        Included if the blob is incremental copy blob or incremental copy snapshot,
-        if x-ms-copy-status is success. Snapshot time of the last successful
-        incremental copy snapshot for this blob.
-    """
-
-    def __init__(self, **kwargs):
-        self.id = kwargs.get('x-ms-copy-id')
-        self.source = kwargs.get('x-ms-copy-source')
-        self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
-        self.progress = kwargs.get('x-ms-copy-progress')
-        self.completion_time = kwargs.get('x-ms-copy-completion_time')
-        self.status_description = kwargs.get('x-ms-copy-status-description')
-        self.incremental_copy = kwargs.get('x-ms-incremental-copy')
-        self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        copy = cls()
-        copy.id = generated.properties.copy_id or None
-        copy.status = get_enum_value(generated.properties.copy_status) or None
-        copy.source = generated.properties.copy_source or None
-        copy.progress = generated.properties.copy_progress or None
-        copy.completion_time = generated.properties.copy_completion_time or None
-        copy.status_description = generated.properties.copy_status_description or None
-        copy.incremental_copy = generated.properties.incremental_copy or None
-        copy.destination_snapshot = generated.properties.destination_snapshot or None
-        return copy
-
-
-class BlobBlock(DictMixin):
-    """BlockBlob Block class.
-
-    :param str block_id:
-        Block id.
-    :param str state:
-        Block state. Possible values: committed|uncommitted
-    :ivar int size:
-        Block size in bytes.
-    """
-
-    def __init__(self, block_id, state=BlockState.Latest):
-        self.id = block_id
-        self.state = state
-        self.size = None
-
-    @classmethod
-    def _from_generated(cls, generated):
-        block = cls(decode_base64_to_text(generated.name))
-        block.size = generated.size
-        return block
-
-
-class PageRange(DictMixin):
-    """Page Range for page blob.
-
-    :param int start:
-        Start of page range in bytes.
-    :param int end:
-        End of page range in bytes.
-    """
-
-    def __init__(self, start=None, end=None):
-        self.start = start
-        self.end = end
-
-
-class AccessPolicy(GenAccessPolicy):
-    """Access Policy class used by the set and get access policy methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and
-    permissions for the Shared Access Signatures with which it's associated.
-    Depending on how you want to control access to your resource, you can
-    specify all of these parameters within the stored access policy, and omit
-    them from the URL for the Shared Access Signature. Doing so permits you to
-    modify the associated signature's behavior at any time, as well as to revoke
-    it. Or you can specify one or more of the access policy parameters within
-    the stored access policy, and the others on the URL. Finally, you can
-    specify all of the parameters on the URL. In this case, you can use the
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must
-    include all fields required to authenticate the signature. If any required
-    fields are missing, the request will fail. Likewise, if a field is specified
-    both in the Shared Access Signature URL and in the stored access policy, the
-    request will fail with status code 400 (Bad Request).
-
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.ContainerSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    """
-    def __init__(self, permission=None, expiry=None, start=None):
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class ContainerSasPermissions(object):
-    """ContainerSasPermissions class to be used with the
-    :func:`~azure.storage.blob.generate_container_sas` function and
-    for the AccessPolicies used with
-    :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`.
-
-    :param bool read:
-        Read the content, properties, metadata or block list of any blob in the
-        container. Use any blob in the container as the source of a copy operation.
-    :param bool write:
-        For any blob in the container, create or write content, properties,
-        metadata, or block list. Snapshot or lease the blob. Resize the blob
-        (page blob only). Use the blob as the destination of a copy operation
-        within the same account. Note: You cannot grant permissions to read or
-        write container properties or metadata, nor to lease a container, with
-        a container SAS. Use an account SAS instead.
-    :param bool delete:
-        Delete any blob in the container. Note: You cannot grant permissions to
-        delete a container with a container SAS. Use an account SAS instead.
-    :param bool list:
-        List blobs in the container.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False):  # pylint: disable=redefined-builtin
-        self.read = read
-        self.write = write
-        self.delete = delete
-        self.list = list
-        self._str = (('r' if self.read else '') +
-                     ('w' if self.write else '') +
-                     ('d' if self.delete else '') +
-                     ('l' if self.list else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create a ContainerSasPermissions from a string.
-
-        To specify read, write, delete, or list permissions you need only to
-        include the first letter of the word in the string. E.g. For read and
-        write permissions, you would provide a string "rw".
-
-        :param str permission: The string which dictates the read, write, delete,
-            and list permissions.
-        :return: A ContainerSasPermissions object
-        :rtype: ~azure.storage.blob.ContainerSasPermissions
-        """
-        p_read = 'r' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-        p_list = 'l' in permission
-        parsed = cls(p_read, p_write, p_delete, p_list)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-
-class BlobSasPermissions(object):
-    """BlobSasPermissions class to be used with the
-    :func:`~azure.storage.blob.generate_blob_sas` function.
-
-    :param bool read:
-        Read the content, properties, metadata and block list. Use the blob as
-        the source of a copy operation.
-    :param bool add:
-        Add a block to an append blob.
-    :param bool create:
-        Write a new blob, snapshot a blob, or copy a blob to a new blob.
-    :param bool write:
-        Create or write content, properties, metadata, or block list. Snapshot
-        or lease the blob. Resize the blob (page blob only). Use the blob as the
-        destination of a copy operation within the same account.
-    :param bool delete:
-        Delete the blob.
-    """
-    def __init__(self, read=False, add=False, create=False, write=False,
-                 delete=False):
-        self.read = read
-        self.add = add
-        self.create = create
-        self.write = write
-        self.delete = delete
-        self._str = (('r' if self.read else '') +
-                     ('a' if self.add else '') +
-                     ('c' if self.create else '') +
-                     ('w' if self.write else '') +
-                     ('d' if self.delete else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create a BlobSasPermissions from a string.
-
-        To specify read, add, create, write, or delete permissions you need only to
-        include the first letter of the word in the string. E.g. For read and
-        write permissions, you would provide a string "rw".
-
-        :param str permission: The string which dictates the read, add, create,
-            write, or delete permissions.
-        :return: A BlobSasPermissions object
-        :rtype: ~azure.storage.blob.BlobSasPermissions
-        """
-        p_read = 'r' in permission
-        p_add = 'a' in permission
-        p_create = 'c' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-
-        parsed = cls(p_read, p_add, p_create, p_write, p_delete)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-
-class CustomerProvidedEncryptionKey(object):
-    """
-    All data in Azure Storage is encrypted at-rest using an account-level encryption key.
-    In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents
-    and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service.
-
-    When you use a customer-provided key, Azure Storage does not manage or persist your key.
-    When writing data to a blob, the provided key is used to encrypt your data before writing it to disk.
-    A SHA-256 hash of the encryption key is written alongside the blob contents,
-    and is used to verify that all subsequent operations against the blob use the same encryption key.
-    This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob.
-    When reading a blob, the provided key is used to decrypt your data after reading it from disk.
-    In both cases, the provided encryption key is securely discarded
-    as soon as the encryption or decryption process completes.
-
-    :param str key_value:
-        Base64-encoded AES-256 encryption key value.
-    :param str key_hash:
-        Base64-encoded SHA256 of the encryption key.
-    :ivar str algorithm:
-        Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
-    """
-    def __init__(self, key_value, key_hash):
-        self.key_value = key_value
-        self.key_hash = key_hash
-        self.algorithm = 'AES256'
-
-
-class ContainerEncryptionScope(object):
-    """The default encryption scope configuration for a container.
-
-    This scope is used implicitly for all future writes within the container,
-    but can be overridden per blob operation.
-
-    .. versionadded:: 12.2.0
-
-    :param str default_encryption_scope:
-        Specifies the default encryption scope to set on the container and use for
-        all future writes.
-    :param bool prevent_encryption_scope_override:
-        If true, prevents any request from specifying a different encryption scope than the scope
-        set on the container. Default value is false.
-    """
-
-    def __init__(self, default_encryption_scope, **kwargs):
-        self.default_encryption_scope = default_encryption_scope
-        self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False)
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if generated.properties.default_encryption_scope:
-            scope = cls(
-                generated.properties.default_encryption_scope,
-                prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False
-            )
-            return scope
-        return None
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_serialize.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_serialize.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_serialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_serialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,103 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from azure.core import MatchConditions
-
-from ._models import ContainerEncryptionScope
-from ._generated.models import (
-    ModifiedAccessConditions,
-    SourceModifiedAccessConditions,
-    CpkScopeInfo,
-    ContainerCpkScopeInfo
-)
-
-
-_SUPPORTED_API_VERSIONS = [
-    '2019-02-02',
-    '2019-07-07'
-]
-
-
-def _get_match_headers(kwargs, match_param, etag_param):
-    # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str])
-    if_match = None
-    if_none_match = None
-    match_condition = kwargs.pop(match_param, None)
-    if match_condition == MatchConditions.IfNotModified:
-        if_match = kwargs.pop(etag_param, None)
-        if not if_match:
-            raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
-    elif match_condition == MatchConditions.IfPresent:
-        if_match = '*'
-    elif match_condition == MatchConditions.IfModified:
-        if_none_match = kwargs.pop(etag_param, None)
-        if not if_none_match:
-            raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
-    elif match_condition == MatchConditions.IfMissing:
-        if_none_match = '*'
-    elif match_condition is None:
-        if etag_param in kwargs:
-            raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param))
-    else:
-        raise TypeError("Invalid match condition: {}".format(match_condition))
-    return if_match, if_none_match
-
-
-def get_modify_conditions(kwargs):
-    # type: (Dict[str, Any]) -> ModifiedAccessConditions
-    if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
-    return ModifiedAccessConditions(
-        if_modified_since=kwargs.pop('if_modified_since', None),
-        if_unmodified_since=kwargs.pop('if_unmodified_since', None),
-        if_match=if_match or kwargs.pop('if_match', None),
-        if_none_match=if_none_match or kwargs.pop('if_none_match', None)
-    )
-
-
-def get_source_conditions(kwargs):
-    # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
-    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
-    return SourceModifiedAccessConditions(
-        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
-        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
-        source_if_match=if_match or kwargs.pop('source_if_match', None),
-        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
-    )
-
-
-def get_cpk_scope_info(kwargs):
-    # type: (Dict[str, Any]) -> CpkScopeInfo
-    if 'encryption_scope' in kwargs:
-        return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope'))
-    return None
-
-
-def get_container_cpk_scope_info(kwargs):
-    # type: (Dict[str, Any]) -> ContainerCpkScopeInfo
-    encryption_scope = kwargs.pop('container_encryption_scope', None)
-    if encryption_scope:
-        if isinstance(encryption_scope, ContainerEncryptionScope):
-            return ContainerCpkScopeInfo(
-                default_encryption_scope=encryption_scope.default_encryption_scope,
-                prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override
-            )
-        if isinstance(encryption_scope, dict):
-            return ContainerCpkScopeInfo(
-                default_encryption_scope=encryption_scope['default_encryption_scope'],
-                prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override')
-            )
-        raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.")
-    return None
-
-
-def get_api_version(kwargs, default):
-    # type: (Dict[str, Any]) -> str
-    api_version = kwargs.pop('api_version', None)
-    if api_version and api_version not in _SUPPORTED_API_VERSIONS:
-        versions = '\n'.join(_SUPPORTED_API_VERSIONS)
-        raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions))
-    return api_version or default
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,56 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-
-try:
-    from urllib.parse import quote, unquote
-except ImportError:
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-
-
-def url_quote(url):
-    return quote(url)
-
-
-def url_unquote(url):
-    return unquote(url)
-
-
-def encode_base64(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def decode_base64_to_bytes(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def decode_base64_to_text(data):
-    decoded_bytes = decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, six.text_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, six.text_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = encode_base64(digest)
-    return encoded_digest
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/authentication.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/authentication.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/authentication.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/authentication.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,136 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-import sys
-
-try:
-    from urllib.parse import urlparse, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import unquote # type: ignore
-
-try:
-    from yarl import URL
-except ImportError:
-    pass
-
-try:
-    from azure.core.pipeline.transport import AioHttpTransport
-except ImportError:
-    AioHttpTransport = None
-
-from azure.core.exceptions import ClientAuthenticationError
-from azure.core.pipeline.policies import SansIOHTTPPolicy
-
-from . import sign_string
-
-
-logger = logging.getLogger(__name__)
-
-
-
-# wraps a given exception with the desired exception type
-def _wrap_exception(ex, desired_type):
-    msg = ""
-    if ex.args:
-        msg = ex.args[0]
-    if sys.version_info >= (3,):
-        # Automatic chaining in Python 3 means we keep the trace
-        return desired_type(msg)
-    # There isn't a good solution in 2 for keeping the stack trace
-    # in general, or that will not result in an error in 3
-    # However, we can keep the previous error type and message
-    # TODO: In the future we will log the trace
-    return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
-
-
-class AzureSigningError(ClientAuthenticationError):
-    """
-    Represents a fatal error when attempting to sign a request.
-    In general, the cause of this exception is user error. For example, the given account key is not valid.
-    Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
-    """
-
-
-# pylint: disable=no-self-use
-class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
-
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-        super(SharedKeyCredentialPolicy, self).__init__()
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.http_request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = urlparse(request.http_request.url).path
-        try:
-            if isinstance(request.context.transport, AioHttpTransport) or \
-                isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport):
-                uri_path = URL(uri_path)
-                return '/' + self.account_name + str(uri_path)
-        except TypeError:
-            pass
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.http_request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.http_request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value is not None:
-                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
-
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        try:
-            signature = sign_string(self.account_key, string_to_sign)
-            auth_string = 'SharedKey ' + self.account_name + ':' + signature
-            request.http_request.headers['Authorization'] = auth_string
-        except Exception as ex:
-            # Wrap any error that occurred as signing error
-            # Doing so will clarify/locate the source of problem
-            raise _wrap_exception(ex, AzureSigningError)
-
-    def on_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        #logger.debug("String_to_sign=%s", string_to_sign)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,430 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union,
-    Optional,
-    Any,
-    Iterable,
-    Dict,
-    List,
-    Type,
-    Tuple,
-    TYPE_CHECKING,
-)
-import logging
-
-try:
-    from urllib.parse import parse_qs, quote
-except ImportError:
-    from urlparse import parse_qs  # type: ignore
-    from urllib2 import quote  # type: ignore
-
-import six
-
-from azure.core.configuration import Configuration
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline import Pipeline
-from azure.core.pipeline.transport import RequestsTransport, HttpTransport
-from azure.core.pipeline.policies import (
-    RedirectPolicy,
-    ContentDecodePolicy,
-    BearerTokenCredentialPolicy,
-    ProxyPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-    UserAgentPolicy
-)
-
-from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .models import LocationMode
-from .authentication import SharedKeyCredentialPolicy
-from .shared_access_signature import QueryStringConstants
-from .policies import (
-    StorageHeadersPolicy,
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageResponseHook,
-    StorageLoggingPolicy,
-    StorageHosts,
-    QueueMessagePolicy,
-    ExponentialRetry,
-)
-from .._version import VERSION
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-
-_LOGGER = logging.getLogger(__name__)
-_SERVICE_PARAMS = {
-    "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
-    "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"},
-    "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"},
-    "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"},
-}
-
-
-class StorageAccountHostsMixin(object):  # pylint: disable=too-many-instance-attributes
-    def __init__(
-        self,
-        parsed_url,  # type: Any
-        service,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
-        self._hosts = kwargs.get("_hosts")
-        self.scheme = parsed_url.scheme
-
-        if service not in ["blob", "queue", "file-share", "dfs"]:
-            raise ValueError("Invalid service: {}".format(service))
-        service_name = service.split('-')[0]
-        account = parsed_url.netloc.split(".{}.core.".format(service_name))
-        self.account_name = account[0] if len(account) > 1 else None
-        secondary_hostname = None
-
-        self.credential = format_shared_key_credential(account, credential)
-        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
-            raise ValueError("Token credential is only supported with HTTPS.")
-        if hasattr(self.credential, "account_name"):
-            self.account_name = self.credential.account_name
-            secondary_hostname = "{}-secondary.{}.{}".format(
-                self.credential.account_name, service_name, SERVICE_HOST_BASE)
-
-        if not self._hosts:
-            if len(account) > 1:
-                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
-            if kwargs.get("secondary_hostname"):
-                secondary_hostname = kwargs["secondary_hostname"]
-            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
-            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
-
-        self.require_encryption = kwargs.get("require_encryption", False)
-        self.key_encryption_key = kwargs.get("key_encryption_key")
-        self.key_resolver_function = kwargs.get("key_resolver_function")
-        self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)
-
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-
-    def __exit__(self, *args):
-        self._client.__exit__(*args)
-
-    def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        self._client.close()
-
-    @property
-    def url(self):
-        """The full endpoint URL to this entity, including SAS token if used.
-
-        This could be either the primary endpoint,
-        or the secondary endpoint depending on the current :func:`location_mode`.
-        """
-        return self._format_url(self._hosts[self._location_mode])
-
-    @property
-    def primary_endpoint(self):
-        """The full primary endpoint URL.
-
-        :type: str
-        """
-        return self._format_url(self._hosts[LocationMode.PRIMARY])
-
-    @property
-    def primary_hostname(self):
-        """The hostname of the primary endpoint.
-
-        :type: str
-        """
-        return self._hosts[LocationMode.PRIMARY]
-
-    @property
-    def secondary_endpoint(self):
-        """The full secondary endpoint URL if configured.
-
-        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str
-        :raise ValueError:
-        """
-        if not self._hosts[LocationMode.SECONDARY]:
-            raise ValueError("No secondary host configured.")
-        return self._format_url(self._hosts[LocationMode.SECONDARY])
-
-    @property
-    def secondary_hostname(self):
-        """The hostname of the secondary endpoint.
-
-        If not available this will be None. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str or None
-        """
-        return self._hosts[LocationMode.SECONDARY]
-
-    @property
-    def location_mode(self):
-        """The location mode that the client is currently using.
-
-        By default this will be "primary". Options include "primary" and "secondary".
-
-        :type: str
-        """
-
-        return self._location_mode
-
-    @location_mode.setter
-    def location_mode(self, value):
-        if self._hosts.get(value):
-            self._location_mode = value
-            self._client._config.url = self.url  # pylint: disable=protected-access
-        else:
-            raise ValueError("No host URL for location mode: {}".format(value))
-
-    @property
-    def api_version(self):
-        """The version of the Storage API used for requests.
-
-        :type: str
-        """
-        return self._client._config.version  # pylint: disable=protected-access
-
-    def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None):
-        query_str = "?"
-        if snapshot:
-            query_str += "snapshot={}&".format(self.snapshot)
-        if share_snapshot:
-            query_str += "sharesnapshot={}&".format(self.snapshot)
-        if sas_token and not credential:
-            query_str += sas_token
-        elif is_credential_sastoken(credential):
-            query_str += credential.lstrip("?")
-            credential = None
-        return query_str.rstrip("?&"), credential
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, "get_token"):
-            self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-
-        config = kwargs.get("_configuration") or create_configuration(**kwargs)
-        if kwargs.get("_pipeline"):
-            return config, kwargs["_pipeline"]
-        config.transport = kwargs.get("transport")  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            config.transport = RequestsTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            RedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs),
-            config.retry_policy,
-            config.logging_policy,
-            StorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs)
-        ]
-        return config, Pipeline(config.transport, policies=policies)
-
-    def _batch_send(
-        self, *reqs,  # type: HttpRequest
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ],
-            enforce_https=False
-        )
-
-        pipeline_response = self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts()
-            if raise_on_any_failure:
-                parts = list(response.parts())
-                if any(p for p in parts if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts
-                    )
-                    raise error
-                return iter(parts)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-class TransportWrapper(HttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, transport):
-        self._transport = transport
-
-    def send(self, request, **kwargs):
-        return self._transport.send(request, **kwargs)
-
-    def open(self):
-        pass
-
-    def close(self):
-        pass
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, *args):  # pylint: disable=arguments-differ
-        pass
-
-
-def format_shared_key_credential(account, credential):
-    if isinstance(credential, six.string_types):
-        if len(account) < 2:
-            raise ValueError("Unable to determine account name for shared key credential.")
-        credential = {"account_name": account[0], "account_key": credential}
-    if isinstance(credential, dict):
-        if "account_name" not in credential:
-            raise ValueError("Shared key credential missing 'account_name")
-        if "account_key" not in credential:
-            raise ValueError("Shared key credential missing 'account_key")
-        return SharedKeyCredentialPolicy(**credential)
-    return credential
-
-
-def parse_connection_str(conn_str, credential, service):
-    conn_str = conn_str.rstrip(";")
-    conn_settings = [s.split("=", 1) for s in conn_str.split(";")]
-    if any(len(tup) != 2 for tup in conn_settings):
-        raise ValueError("Connection string is either blank or malformed.")
-    conn_settings = dict(conn_settings)
-    endpoints = _SERVICE_PARAMS[service]
-    primary = None
-    secondary = None
-    if not credential:
-        try:
-            credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]}
-        except KeyError:
-            credential = conn_settings.get("SharedAccessSignature")
-    if endpoints["primary"] in conn_settings:
-        primary = conn_settings[endpoints["primary"]]
-        if endpoints["secondary"] in conn_settings:
-            secondary = conn_settings[endpoints["secondary"]]
-    else:
-        if endpoints["secondary"] in conn_settings:
-            raise ValueError("Connection string specifies only secondary endpoint.")
-        try:
-            primary = "{}://{}.{}.{}".format(
-                conn_settings["DefaultEndpointsProtocol"],
-                conn_settings["AccountName"],
-                service,
-                conn_settings["EndpointSuffix"],
-            )
-            secondary = "{}-secondary.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings["EndpointSuffix"]
-            )
-        except KeyError:
-            pass
-
-    if not primary:
-        try:
-            primary = "https://{}.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE)
-            )
-        except KeyError:
-            raise ValueError("Connection string missing required connection details.")
-    return primary, secondary, credential
-
-
-def create_configuration(**kwargs):
-    # type: (**Any) -> Configuration
-    config = Configuration(**kwargs)
-    config.headers_policy = StorageHeadersPolicy(**kwargs)
-    config.user_agent_policy = UserAgentPolicy(
-        sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs)
-    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
-    config.logging_policy = StorageLoggingPolicy(**kwargs)
-    config.proxy_policy = ProxyPolicy(**kwargs)
-
-    # Storage settings
-    config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024)
-    config.copy_polling_interval = 15
-
-    # Block blob uploads
-    config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024)
-    config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1)
-    config.use_byte_buffer = kwargs.get("use_byte_buffer", False)
-
-    # Page blob uploads
-    config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024)
-
-    # Blob downloads
-    config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024)
-    config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024)
-
-    # File uploads
-    config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024)
-    return config
-
-
-def parse_query(query_str):
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
-    sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values]
-    sas_token = None
-    if sas_params:
-        sas_token = "&".join(sas_params)
-
-    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
-    return snapshot, sas_token
-
-
-def is_credential_sastoken(credential):
-    if not credential or not isinstance(credential, six.string_types):
-        return False
-
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = parse_qs(credential.lstrip("?"))
-    if parsed_query and all([k in sas_values for k in parsed_query.keys()]):
-        return True
-    return False
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/base_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,177 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-from azure.core.pipeline import AsyncPipeline
-from azure.core.async_paging import AsyncList
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline.policies import (
-    ContentDecodePolicy,
-    AsyncBearerTokenCredentialPolicy,
-    AsyncRedirectPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-)
-from azure.core.pipeline.transport import AsyncHttpTransport
-
-from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .authentication import SharedKeyCredentialPolicy
-from .base_client import create_configuration
-from .policies import (
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageHosts,
-    StorageHeadersPolicy,
-    QueueMessagePolicy
-)
-from .policies_async import AsyncStorageResponseHook
-
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import Pipeline
-    from azure.core.pipeline.transport import HttpRequest
-    from azure.core.configuration import Configuration
-_LOGGER = logging.getLogger(__name__)
-
-
-class AsyncStorageAccountHostsMixin(object):
-
-    def __enter__(self):
-        raise TypeError("Async client only supports 'async with'.")
-
-    def __exit__(self, *args):
-        pass
-
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-
-    async def __aexit__(self, *args):
-        await self._client.__aexit__(*args)
-
-    async def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        await self._client.close()
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, 'get_token'):
-            self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-        config = kwargs.get('_configuration') or create_configuration(**kwargs)
-        if kwargs.get('_pipeline'):
-            return config, kwargs['_pipeline']
-        config.transport = kwargs.get('transport')  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            try:
-                from azure.core.pipeline.transport import AioHttpTransport
-            except ImportError:
-                raise ImportError("Unable to create async transport. Please check aiohttp is installed.")
-            config.transport = AioHttpTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            AsyncRedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs), # type: ignore
-            config.retry_policy,
-            config.logging_policy,
-            AsyncStorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs),
-        ]
-        return config, AsyncPipeline(config.transport, policies=policies)
-
-    async def _batch_send(
-        self, *reqs: 'HttpRequest',
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ],
-            enforce_https=False
-        )
-
-        pipeline_response = await self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts() # Return an AsyncIterator
-            if raise_on_any_failure:
-                parts_list = []
-                async for part in parts:
-                    parts_list.append(part)
-                if any(p for p in parts_list if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts_list
-                    )
-                    raise error
-                return AsyncList(parts_list)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-
-class AsyncTransportWrapper(AsyncHttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, async_transport):
-        self._transport = async_transport
-
-    async def send(self, request, **kwargs):
-        return await self._transport.send(request, **kwargs)
-
-    async def open(self):
-        pass
-
-    async def close(self):
-        pass
-
-    async def __aenter__(self):
-        pass
-
-    async def __aexit__(self, *args):  # pylint: disable=arguments-differ
-        pass
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/constants.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/constants.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-from .._generated.version import VERSION
-
-
-X_MS_VERSION = VERSION
-
-# Socket timeout in seconds
-CONNECTION_TIMEOUT = 20
-READ_TIMEOUT = 20
-
-# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
-# The socket timeout is now the maximum total duration to send all data.
-if sys.version_info >= (3, 5):
-    # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
-    # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
-    READ_TIMEOUT = 2000
-
-STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
-
-SERVICE_HOST_BASE = 'core.windows.net'
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/encryption.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/encryption.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,542 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-from os import urandom
-from json import (
-    dumps,
-    loads,
-)
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from azure.core.exceptions import HttpResponseError
-
-from .._version import VERSION
-from . import encode_base64, decode_base64_to_bytes
-
-
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError('{0} should not be None.'.format(param_name))
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier,
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError("Unsupported encryption version.")
-    except KeyError:
-        raise ValueError("Unsupported encryption version.")
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
-        raise ValueError('Encryption version is not supported.')
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-    if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
-        raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
-
-
-def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
-    Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
-
-
-def encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the upload_data_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                 content, start_offset, end_offset, response_headers):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
-    except:  # pylint: disable=bare-except
-        if require_encryption:
-            raise ValueError(
-                'Encryption required, but received data does not contain appropriate metatadata.' + \
-                'Data was either not encrypted or metadata has been lost.')
-
-        return content
-
-    if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    blob_type = response_headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    if 'content-range' in response_headers:
-        content_range = response_headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
-
-
-def encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError('Message was not encrypted.')
-
-        return message
-    try:
-        return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception as error:
-        raise HttpResponseError(
-            message="Decryption failed.",
-            response=response,
-            error=error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/models.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/models.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,448 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-def get_enum_value(value):
-    if value is None or value in ["None", ""]:
-        return None
-    try:
-        return value.value
-    except AttributeError:
-        return value
-
-
-class StorageErrorCode(str, Enum):
-
-    # Generic storage values
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    no_authentication_information = "NoAuthenticationInformation"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-
-    # Blob values
-    append_position_condition_not_met = "AppendPositionConditionNotMet"
-    blob_already_exists = "BlobAlreadyExists"
-    blob_not_found = "BlobNotFound"
-    blob_overwritten = "BlobOverwritten"
-    blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
-    block_count_exceeds_limit = "BlockCountExceedsLimit"
-    block_list_too_long = "BlockListTooLong"
-    cannot_change_to_lower_tier = "CannotChangeToLowerTier"
-    cannot_verify_copy_source = "CannotVerifyCopySource"
-    container_already_exists = "ContainerAlreadyExists"
-    container_being_deleted = "ContainerBeingDeleted"
-    container_disabled = "ContainerDisabled"
-    container_not_found = "ContainerNotFound"
-    content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
-    copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
-    copy_id_mismatch = "CopyIdMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-    incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
-    incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
-    incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
-    infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
-    invalid_blob_or_block = "InvalidBlobOrBlock"
-    invalid_blob_tier = "InvalidBlobTier"
-    invalid_blob_type = "InvalidBlobType"
-    invalid_block_id = "InvalidBlockId"
-    invalid_block_list = "InvalidBlockList"
-    invalid_operation = "InvalidOperation"
-    invalid_page_range = "InvalidPageRange"
-    invalid_source_blob_type = "InvalidSourceBlobType"
-    invalid_source_blob_url = "InvalidSourceBlobUrl"
-    invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
-    lease_already_present = "LeaseAlreadyPresent"
-    lease_already_broken = "LeaseAlreadyBroken"
-    lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
-    lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
-    lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
-    lease_id_missing = "LeaseIdMissing"
-    lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
-    lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
-    lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
-    lease_lost = "LeaseLost"
-    lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
-    lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
-    lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
-    max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
-    no_pending_copy_operation = "NoPendingCopyOperation"
-    operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
-    pending_copy_operation = "PendingCopyOperation"
-    previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
-    previous_snapshot_not_found = "PreviousSnapshotNotFound"
-    previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
-    sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
-    sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
-    snapshot_count_exceeded = "SnapshotCountExceeded"
-    snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
-    snapshots_present = "SnapshotsPresent"
-    source_condition_not_met = "SourceConditionNotMet"
-    system_in_use = "SystemInUse"
-    target_condition_not_met = "TargetConditionNotMet"
-    unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
-    blob_being_rehydrated = "BlobBeingRehydrated"
-    blob_archived = "BlobArchived"
-    blob_not_archived = "BlobNotArchived"
-
-    # Queue values
-    invalid_marker = "InvalidMarker"
-    message_not_found = "MessageNotFound"
-    message_too_large = "MessageTooLarge"
-    pop_receipt_mismatch = "PopReceiptMismatch"
-    queue_already_exists = "QueueAlreadyExists"
-    queue_being_deleted = "QueueBeingDeleted"
-    queue_disabled = "QueueDisabled"
-    queue_not_empty = "QueueNotEmpty"
-    queue_not_found = "QueueNotFound"
-
-    # File values
-    cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
-    client_cache_flush_delay = "ClientCacheFlushDelay"
-    delete_pending = "DeletePending"
-    directory_not_empty = "DirectoryNotEmpty"
-    file_lock_conflict = "FileLockConflict"
-    invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
-    parent_not_found = "ParentNotFound"
-    read_only_attribute = "ReadOnlyAttribute"
-    share_already_exists = "ShareAlreadyExists"
-    share_being_deleted = "ShareBeingDeleted"
-    share_disabled = "ShareDisabled"
-    share_not_found = "ShareNotFound"
-    sharing_violation = "SharingViolation"
-    share_snapshot_in_progress = "ShareSnapshotInProgress"
-    share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
-    share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
-    share_has_snapshots = "ShareHasSnapshots"
-    container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
-
-    # DataLake values
-    content_length_must_be_zero = 'ContentLengthMustBeZero'
-    path_already_exists = 'PathAlreadyExists'
-    invalid_flush_position = 'InvalidFlushPosition'
-    invalid_property_name = 'InvalidPropertyName'
-    invalid_source_uri = 'InvalidSourceUri'
-    unsupported_rest_version = 'UnsupportedRestVersion'
-    file_system_not_found = 'FilesystemNotFound'
-    path_not_found = 'PathNotFound'
-    rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound'
-    source_path_not_found = 'SourcePathNotFound'
-    destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted'
-    file_system_already_exists = 'FilesystemAlreadyExists'
-    file_system_being_deleted = 'FilesystemBeingDeleted'
-    invalid_destination_path = 'InvalidDestinationPath'
-    invalid_rename_source_path = 'InvalidRenameSourcePath'
-    invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType'
-    lease_is_already_broken = 'LeaseIsAlreadyBroken'
-    lease_name_mismatch = 'LeaseNameMismatch'
-    path_conflict = 'PathConflict'
-    source_path_is_being_deleted = 'SourcePathIsBeingDeleted'
-
-
-class DictMixin(object):
-
-    def __setitem__(self, key, item):
-        self.__dict__[key] = item
-
-    def __getitem__(self, key):
-        return self.__dict__[key]
-
-    def __repr__(self):
-        return str(self)
-
-    def __len__(self):
-        return len(self.keys())
-
-    def __delitem__(self, key):
-        self.__dict__[key] = None
-
-    def __eq__(self, other):
-        """Compare objects by comparing all attributes."""
-        if isinstance(other, self.__class__):
-            return self.__dict__ == other.__dict__
-        return False
-
-    def __ne__(self, other):
-        """Compare objects by comparing all attributes."""
-        return not self.__eq__(other)
-
-    def __str__(self):
-        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
-
-    def has_key(self, k):
-        return k in self.__dict__
-
-    def update(self, *args, **kwargs):
-        return self.__dict__.update(*args, **kwargs)
-
-    def keys(self):
-        return [k for k in self.__dict__ if not k.startswith('_')]
-
-    def values(self):
-        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def items(self):
-        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def get(self, key, default=None):
-        if key in self.__dict__:
-            return self.__dict__[key]
-        return default
-
-
-class LocationMode(object):
-    """
-    Specifies the location the request should be sent to. This mode only applies
-    for RA-GRS accounts which allow secondary read access. All other account types
-    must use PRIMARY.
-    """
-
-    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
-    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
-
-
-class ResourceTypes(object):
-    """
-    Specifies the resource types that are accessible with the account SAS.
-
-    :param bool service:
-        Access to service-level APIs (e.g., Get/Set Service Properties,
-        Get Service Stats, List Containers/Queues/Shares)
-    :param bool container:
-        Access to container-level APIs (e.g., Create/Delete Container,
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories)
-    :param bool object:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
-    """
-
-    def __init__(self, service=False, container=False, object=False):  # pylint: disable=redefined-builtin
-        self.service = service
-        self.container = container
-        self.object = object
-        self._str = (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create a ResourceTypes from a string.
-
-        To specify service, container, or object you need only to
-        include the first letter of the word in the string. E.g. service and container,
-        you would provide a string "sc".
-
-        :param str string: Specify service, container, or object in
-            in the string with the first letter of the word.
-        :return: A ResourceTypes object
-        :rtype: ~azure.storage.blob.ResourceTypes
-        """
-        res_service = 's' in string
-        res_container = 'c' in string
-        res_object = 'o' in string
-
-        parsed = cls(res_service, res_container, res_object)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class AccountSasPermissions(object):
-    """
-    :class:`~ResourceTypes` class to be used with generate_account_sas
-    function and for the AccessPolicies used with set_*_acl. There are two types of
-    SAS which may be used to grant resource access. One is to grant access to a
-    specific resource (resource-specific). Another is to grant access to the
-    entire service for a specific account and allow certain operations based on
-    perms found here.
-
-    :param bool read:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits read permissions to the specified resource type.
-    :param bool write:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits write permissions to the specified resource type.
-    :param bool delete:
-        Valid for Container and Object resource types, except for queue messages.
-    :param bool list:
-        Valid for Service and Container resource types only.
-    :param bool add:
-        Valid for the following Object resource types only: queue messages, and append blobs.
-    :param bool create:
-        Valid for the following Object resource types only: blobs and files.
-        Users can create new blobs or files, but may not overwrite existing
-        blobs or files.
-    :param bool update:
-        Valid for the following Object resource types only: queue messages.
-    :param bool process:
-        Valid for the following Object resource type only: queue messages.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
-                 add=False, create=False, update=False, process=False):
-        self.read = read
-        self.write = write
-        self.delete = delete
-        self.list = list
-        self.add = add
-        self.create = create
-        self.update = update
-        self.process = process
-        self._str = (('r' if self.read else '') +
-                     ('w' if  self.write else '') +
-                     ('d' if self.delete else '') +
-                     ('l' if self.list else '') +
-                     ('a' if self.add else '') +
-                     ('c' if self.create else '') +
-                     ('u' if self.update else '') +
-                     ('p' if self.process else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create AccountSasPermissions from a string.
-
-        To specify read, write, delete, etc. permissions you need only to
-        include the first letter of the word in the string. E.g. for read and write
-        permissions you would provide a string "rw".
-
-        :param str permission: Specify permissions in
-            the string with the first letter of the word.
-        :return: A AccountSasPermissions object
-        :rtype: ~azure.storage.blob.AccountSasPermissions
-        """
-        p_read = 'r' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-        p_list = 'l' in permission
-        p_add = 'a' in permission
-        p_create = 'c' in permission
-        p_update = 'u' in permission
-        p_process = 'p' in permission
-
-        parsed = cls(p_read, p_write, p_delete, p_list, p_add, p_create, p_update, p_process)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-class Services(object):
-    """Specifies the services accessible with the account SAS.
-
-    :param bool blob:
-        Access for the `~azure.storage.blob.BlobServiceClient`
-    :param bool queue:
-        Access for the `~azure.storage.queue.QueueServiceClient`
-    :param bool fileshare:
-        Access for the `~azure.storage.fileshare.ShareServiceClient`
-    """
-
-    def __init__(self, blob=False, queue=False, fileshare=False):
-        self.blob = blob
-        self.queue = queue
-        self.fileshare = fileshare
-        self._str = (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('f' if self.fileshare else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create Services from a string.
-
-        To specify blob, queue, or file you need only to
-        include the first letter of the word in the string. E.g. for blob and queue
-        you would provide a string "bq".
-
-        :param str string: Specify blob, queue, or file in
-            in the string with the first letter of the word.
-        :return: A Services object
-        :rtype: ~azure.storage.blob.Services
-        """
-        res_blob = 'b' in string
-        res_queue = 'q' in string
-        res_file = 'f' in string
-
-        parsed = cls(res_blob, res_queue, res_file)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class UserDelegationKey(object):
-    """
-    Represents a user delegation key, provided to the user by Azure Storage
-    based on their Azure Active Directory access token.
-
-    The fields are saved as simple strings since the user does not have to interact with this object;
-    to generate an identify SAS, the user can simply pass it to the right API.
-
-    :ivar str signed_oid:
-        Object ID of this token.
-    :ivar str signed_tid:
-        Tenant ID of the tenant that issued this token.
-    :ivar str signed_start:
-        The datetime this token becomes valid.
-    :ivar str signed_expiry:
-        The datetime this token expires.
-    :ivar str signed_service:
-        What service this key is valid for.
-    :ivar str signed_version:
-        The version identifier of the REST service that created this token.
-    :ivar str value:
-        The user delegation key.
-    """
-    def __init__(self):
-        self.signed_oid = None
-        self.signed_tid = None
-        self.signed_start = None
-        self.signed_expiry = None
-        self.signed_service = None
-        self.signed_version = None
-        self.value = None
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/parser.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/parser.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/parser.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/parser.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):  # pylint: disable=undefined-variable
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,610 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import re
-import random
-from time import time
-from io import SEEK_SET, UnsupportedOperation
-import logging
-import uuid
-import types
-from typing import Any, TYPE_CHECKING
-from wsgiref.handlers import format_date_time
-try:
-    from urllib.parse import (
-        urlparse,
-        parse_qsl,
-        urlunparse,
-        urlencode,
-    )
-except ImportError:
-    from urllib import urlencode # type: ignore
-    from urlparse import ( # type: ignore
-        urlparse,
-        parse_qsl,
-        urlunparse,
-    )
-
-from azure.core.pipeline.policies import (
-    HeadersPolicy,
-    SansIOHTTPPolicy,
-    NetworkTraceLoggingPolicy,
-    HTTPPolicy,
-    RequestHistory
-)
-from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
-
-from .models import LocationMode
-
-try:
-    _unicode_type = unicode # type: ignore
-except NameError:
-    _unicode_type = str
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def is_exhausted(settings):
-    """Are we out of retries?"""
-    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
-    retry_counts = list(filter(None, retry_counts))
-    if not retry_counts:
-        return False
-    return min(retry_counts) < 0
-
-
-def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
-
-
-def is_retry(response, mode):
-    """Is this method/status code retryable? (Based on whitelists and control
-    variables such as the number of total retries to allow, whether to
-    respect the Retry-After header, whether this header is present, and
-    whether the returned status code is on the list of status codes to
-    be retried upon on the presence of the aforementioned header)
-    """
-    status = response.http_response.status_code
-    if 300 <= status < 500:
-        # An exception occured, but in most cases it was expected. Examples could
-        # include a 309 Conflict or 412 Precondition Failed.
-        if status == 404 and mode == LocationMode.SECONDARY:
-            # Response code 404 should be retried if secondary was used.
-            return True
-        if status == 408:
-            # Response code 408 is a timeout and should be retried.
-            return True
-        return False
-    if status >= 500:
-        # Response codes above 500 with the exception of 501 Not Implemented and
-        # 505 Version Not Supported indicate a server issue and should be retried.
-        if status in [501, 505]:
-            return False
-        return True
-    return False
-
-
-def urljoin(base_url, stub_url):
-    parsed = urlparse(base_url)
-    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
-    return parsed.geturl()
-
-
-class QueueMessagePolicy(SansIOHTTPPolicy):
-
-    def on_request(self, request):
-        message_id = request.context.options.pop('queue_message_id', None)
-        if message_id:
-            request.http_request.url = urljoin(
-                request.http_request.url,
-                message_id)
-
-
-class StorageHeadersPolicy(HeadersPolicy):
-    request_id_header_name = 'x-ms-client-request-id'
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        super(StorageHeadersPolicy, self).on_request(request)
-        current_time = format_date_time(time())
-        request.http_request.headers['x-ms-date'] = current_time
-
-        custom_id = request.context.options.pop('client_request_id', None)
-        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
-
-    # def on_response(self, request, response):
-    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
-    #     if self.request_id_header_name in response.http_response.headers:
-
-    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
-
-    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
-    #             raise AzureError(
-    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
-    #                 "Service request ID: {}".format(
-    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
-    #                     response.http_response.headers['x-ms-request-id']),
-    #                 response=response.http_response
-    #             )
-
-
-class StorageHosts(SansIOHTTPPolicy):
-
-    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
-        self.hosts = hosts
-        super(StorageHosts, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        request.context.options['hosts'] = self.hosts
-        parsed_url = urlparse(request.http_request.url)
-
-        # Detect what location mode we're currently requesting with
-        location_mode = LocationMode.PRIMARY
-        for key, value in self.hosts.items():
-            if parsed_url.netloc == value:
-                location_mode = key
-
-        # See if a specific location mode has been specified, and if so, redirect
-        use_location = request.context.options.pop('use_location', None)
-        if use_location:
-            # Lock retries to the specific location
-            request.context.options['retry_to_secondary'] = False
-            if use_location not in self.hosts:
-                raise ValueError("Attempting to use undefined host location {}".format(use_location))
-            if use_location != location_mode:
-                # Update request URL to use the specified location
-                updated = parsed_url._replace(netloc=self.hosts[use_location])
-                request.http_request.url = updated.geturl()
-                location_mode = use_location
-
-        request.context.options['location_mode'] = location_mode
-
-
-class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
-    """A policy that logs HTTP request and response to the DEBUG logger.
-
-    This accepts both global configuration, and per-request level with "enable_http_logger"
-    """
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        http_request = request.http_request
-        options = request.context.options
-        if options.pop("logging_enable", self.enable_http_logger):
-            request.context["logging_enable"] = True
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                log_url = http_request.url
-                query_params = http_request.query
-                if 'sig' in query_params:
-                    log_url = log_url.replace(query_params['sig'], "sig=*****")
-                _LOGGER.debug("Request URL: %r", log_url)
-                _LOGGER.debug("Request method: %r", http_request.method)
-                _LOGGER.debug("Request headers:")
-                for header, value in http_request.headers.items():
-                    if header.lower() == 'authorization':
-                        value = '*****'
-                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
-                        # take the url apart and scrub away the signed signature
-                        scheme, netloc, path, params, query, fragment = urlparse(value)
-                        parsed_qs = dict(parse_qsl(query))
-                        parsed_qs['sig'] = '*****'
-
-                        # the SAS needs to be put back together
-                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
-
-                    _LOGGER.debug("    %r: %r", header, value)
-                _LOGGER.debug("Request body:")
-
-                # We don't want to log the binary data of a file upload.
-                if isinstance(http_request.body, types.GeneratorType):
-                    _LOGGER.debug("File upload")
-                else:
-                    _LOGGER.debug(str(http_request.body))
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log request: %r", err)
-
-    def on_response(self, request, response):
-        # type: (PipelineRequest, PipelineResponse, Any) -> None
-        if response.context.pop("logging_enable", self.enable_http_logger):
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                _LOGGER.debug("Response status: %r", response.http_response.status_code)
-                _LOGGER.debug("Response headers:")
-                for res_header, value in response.http_response.headers.items():
-                    _LOGGER.debug("    %r: %r", res_header, value)
-
-                # We don't want to log binary data if the response is a file.
-                _LOGGER.debug("Response content:")
-                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
-                header = response.http_response.headers.get('content-disposition')
-
-                if header and pattern.match(header):
-                    filename = header.partition('=')[2]
-                    _LOGGER.debug("File attachments: %s", filename)
-                elif response.http_response.headers.get("content-type", "").endswith("octet-stream"):
-                    _LOGGER.debug("Body contains binary data.")
-                elif response.http_response.headers.get("content-type", "").startswith("image"):
-                    _LOGGER.debug("Body contains image data.")
-                else:
-                    if response.context.options.get('stream', False):
-                        _LOGGER.debug("Body is streamable")
-                    else:
-                        _LOGGER.debug(response.http_response.text())
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log response: %s", repr(err))
-
-
-class StorageRequestHook(SansIOHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._request_callback = kwargs.get('raw_request_hook')
-        super(StorageRequestHook, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, **Any) -> PipelineResponse
-        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
-        if request_callback:
-            request_callback(request)
-
-
-class StorageResponseHook(HTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(StorageResponseHook, self).__init__()
-
-    def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = self.next.send(request)
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-
-class StorageContentValidation(SansIOHTTPPolicy):
-    """A simple policy that sends the given headers
-    with the request.
-
-    This will overwrite any headers already defined in the request.
-    """
-    header_name = 'Content-MD5'
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        super(StorageContentValidation, self).__init__()
-
-    @staticmethod
-    def get_content_md5(data):
-        md5 = hashlib.md5()
-        if isinstance(data, bytes):
-            md5.update(data)
-        elif hasattr(data, 'read'):
-            pos = 0
-            try:
-                pos = data.tell()
-            except:  # pylint: disable=bare-except
-                pass
-            for chunk in iter(lambda: data.read(4096), b""):
-                md5.update(chunk)
-            try:
-                data.seek(pos, SEEK_SET)
-            except (AttributeError, IOError):
-                raise ValueError("Data should be bytes or a seekable file-like object.")
-        else:
-            raise ValueError("Data should be bytes or a seekable file-like object.")
-
-        return md5.digest()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        validate_content = request.context.options.pop('validate_content', False)
-        if validate_content and request.http_request.method != 'GET':
-            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
-            request.http_request.headers[self.header_name] = computed_md5
-            request.context['validate_content_md5'] = computed_md5
-        request.context['validate_content'] = validate_content
-
-    def on_response(self, request, response):
-        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
-            computed_md5 = request.context.get('validate_content_md5') or \
-                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
-            if response.http_response.headers['content-md5'] != computed_md5:
-                raise AzureError(
-                    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format(
-                        response.http_response.headers['content-md5'], computed_md5),
-                    response=response.http_response
-                )
-
-
-class StorageRetryPolicy(HTTPPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    def __init__(self, **kwargs):
-        self.total_retries = kwargs.pop('retry_total', 10)
-        self.connect_retries = kwargs.pop('retry_connect', 3)
-        self.read_retries = kwargs.pop('retry_read', 3)
-        self.status_retries = kwargs.pop('retry_status', 3)
-        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
-        super(StorageRetryPolicy, self).__init__()
-
-    def _set_next_host_location(self, settings, request):  # pylint: disable=no-self-use
-        """
-        A function which sets the next host location on the request, if applicable.
-
-        :param ~azure.storage.models.RetryContext context:
-            The retry context containing the previous host location and the request
-            to evaluate and possibly modify.
-        """
-        if settings['hosts'] and all(settings['hosts'].values()):
-            url = urlparse(request.url)
-            # If there's more than one possible location, retry to the alternative
-            if settings['mode'] == LocationMode.PRIMARY:
-                settings['mode'] = LocationMode.SECONDARY
-            else:
-                settings['mode'] = LocationMode.PRIMARY
-            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
-            request.url = updated.geturl()
-
-    def configure_retries(self, request):  # pylint: disable=no-self-use
-        body_position = None
-        if hasattr(request.http_request.body, 'read'):
-            try:
-                body_position = request.http_request.body.tell()
-            except (AttributeError, UnsupportedOperation):
-                # if body position cannot be obtained, then retries will not work
-                pass
-        options = request.context.options
-        return {
-            'total': options.pop("retry_total", self.total_retries),
-            'connect': options.pop("retry_connect", self.connect_retries),
-            'read': options.pop("retry_read", self.read_retries),
-            'status': options.pop("retry_status", self.status_retries),
-            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
-            'mode': options.pop("location_mode", LocationMode.PRIMARY),
-            'hosts': options.pop("hosts", None),
-            'hook': options.pop("retry_hook", None),
-            'body_position': body_position,
-            'count': 0,
-            'history': []
-        }
-
-    def get_backoff_time(self, settings):  # pylint: disable=unused-argument,no-self-use
-        """ Formula for computing the current backoff.
-        Should be calculated by child class.
-
-        :rtype: float
-        """
-        return 0
-
-    def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        transport.sleep(backoff)
-
-    def increment(self, settings, request, response=None, error=None):
-        """Increment the retry counters.
-
-        :param response: A pipeline response object.
-        :param error: An error encountered during the request, or
-            None if the response was received successfully.
-
-        :return: Whether the retry attempts are exhausted.
-        """
-        settings['total'] -= 1
-
-        if error and isinstance(error, ServiceRequestError):
-            # Errors when we're fairly sure that the server did not receive the
-            # request, so it should be safe to retry.
-            settings['connect'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        elif error and isinstance(error, ServiceResponseError):
-            # Errors that occur after the request has been started, so we should
-            # assume that the server began processing it.
-            settings['read'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        else:
-            # Incrementing because of a server error like a 500 in
-            # status_forcelist and a the given method is in the whitelist
-            if response:
-                settings['status'] -= 1
-                settings['history'].append(RequestHistory(request, http_response=response))
-
-        if not is_exhausted(settings):
-            if request.method not in ['PUT'] and settings['retry_secondary']:
-                self._set_next_host_location(settings, request)
-
-            # rewind the request body if it is a stream
-            if request.body and hasattr(request.body, 'read'):
-                # no position was saved, then retry would not work
-                if settings['body_position'] is None:
-                    return False
-                try:
-                    # attempt to rewind the body to the initial position
-                    request.body.seek(settings['body_position'], SEEK_SET)
-                except (UnsupportedOperation, ValueError):
-                    # if body is not seekable, then retry would not work
-                    return False
-            settings['count'] += 1
-            return True
-        return False
-
-    def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(StorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(StorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/policies_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,219 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import asyncio
-import random
-import logging
-from typing import Any, TYPE_CHECKING
-
-from azure.core.pipeline.policies import AsyncHTTPPolicy
-from azure.core.exceptions import AzureError
-
-from .policies import is_retry, StorageRetryPolicy
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-async def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        if asyncio.iscoroutine(settings['hook']):
-            await settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-        else:
-            settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-
-
-class AsyncStorageResponseHook(AsyncHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(AsyncStorageResponseHook, self).__init__()
-
-    async def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = await self.next.send(request)
-        await response.http_response.load_body()
-
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            if asyncio.iscoroutine(response_callback):
-                await response_callback(response)
-            else:
-                response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-class AsyncStorageRetryPolicy(StorageRetryPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    async def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        await transport.sleep(backoff)
-
-    async def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = await self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        await retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        await self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    await retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    await self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(AsyncStorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(AsyncStorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/request_handlers.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/request_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/request_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/request_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,147 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-
-import logging
-from os import fstat
-from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
-
-import isodate
-
-from azure.core.exceptions import raise_with_traceback
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def serialize_iso(attr):
-    """Serialize Datetime object into ISO-8601 formatted string.
-
-    :param Datetime attr: Object to be serialized.
-    :rtype: str
-    :raises: ValueError if format invalid.
-    """
-    if not attr:
-        return None
-    if isinstance(attr, str):
-        attr = isodate.parse_datetime(attr)
-    try:
-        utc = attr.utctimetuple()
-        if utc.tm_year > 9999 or utc.tm_year < 1:
-            raise OverflowError("Hit max or min date")
-
-        date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
-            utc.tm_year, utc.tm_mon, utc.tm_mday,
-            utc.tm_hour, utc.tm_min, utc.tm_sec)
-        return date + 'Z'
-    except (ValueError, OverflowError) as err:
-        msg = "Unable to serialize datetime object."
-        raise_with_traceback(ValueError, msg, err)
-    except AttributeError as err:
-        msg = "ISO-8601 object must be valid Datetime object."
-        raise_with_traceback(TypeError, msg, err)
-
-
-def get_length(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:  # pylint: disable=bare-except
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            try:
-                return fstat(fileno).st_size
-            except OSError:
-                # Not a valid fileno, may be possible requests returned
-                # a socket number?
-                pass
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
-
-
-def read_length(data):
-    try:
-        if hasattr(data, 'read'):
-            read_data = b''
-            for chunk in iter(lambda: data.read(4096), b""):
-                read_data += chunk
-            return len(read_data), read_data
-        if hasattr(data, '__iter__'):
-            read_data = b''
-            for chunk in data:
-                read_data += chunk
-            return len(read_data), read_data
-    except:  # pylint: disable=bare-except
-        pass
-    raise ValueError("Unable to calculate content length, please specify.")
-
-
-def validate_and_format_range_headers(
-        start_range, end_range, start_range_required=True,
-        end_range_required=True, check_content_md5=False, align_to_page=False):
-    # If end range is provided, start range must be provided
-    if (start_range_required or end_range is not None) and start_range is None:
-        raise ValueError("start_range value cannot be None.")
-    if end_range_required and end_range is None:
-        raise ValueError("end_range value cannot be None.")
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError("Invalid page blob start_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(start_range))
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError("Invalid page blob end_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(end_range))
-
-    # Format based on whether end_range is present
-    range_header = None
-    if end_range is not None:
-        range_header = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        range_header = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    range_validation = None
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError("Both start and end range requied for MD5 content validation.")
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
-        range_validation = 'true'
-
-    return range_header, range_validation
-
-
-def add_metadata_headers(metadata=None):
-    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
-    headers = {}
-    if metadata:
-        for key, value in metadata.items():
-            headers['x-ms-meta-{}'.format(key)] = value
-    return headers
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/response_handlers.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/response_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/response_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/response_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.exceptions import (
-    HttpResponseError,
-    ResourceNotFoundError,
-    ResourceModifiedError,
-    ResourceExistsError,
-    ClientAuthenticationError,
-    DecodeError)
-
-from .parser import _to_utc_datetime
-from .models import StorageErrorCode, UserDelegationKey, get_enum_value
-
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.exceptions import AzureError
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-class PartialBatchErrorException(HttpResponseError):
-    """There is a partial failure in batch operations.
-
-    :param str message: The message of the exception.
-    :param response: Server response to be deserialized.
-    :param list parts: A list of the parts in multipart response.
-    """
-
-    def __init__(self, message, response, parts):
-        self.parts = parts
-        super(PartialBatchErrorException, self).__init__(message=message, response=response)
-
-
-def parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def normalize_headers(headers):
-    normalized = {}
-    for key, value in headers.items():
-        if key.startswith('x-ms-'):
-            key = key[5:]
-        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
-    return normalized
-
-
-def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
-    raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
-    return {k[10:]: v for k, v in raw_metadata.items()}
-
-
-def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers)
-
-
-def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers), deserialized
-
-
-def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return response.location_mode, deserialized
-
-
-def process_storage_error(storage_error):
-    raise_error = HttpResponseError
-    error_code = storage_error.response.headers.get('x-ms-error-code')
-    error_message = storage_error.message
-    additional_data = {}
-    try:
-        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
-        if error_body:
-            for info in error_body.iter():
-                if info.tag.lower() == 'code':
-                    error_code = info.text
-                elif info.tag.lower() == 'message':
-                    error_message = info.text
-                else:
-                    additional_data[info.tag] = info.text
-    except DecodeError:
-        pass
-
-    try:
-        if error_code:
-            error_code = StorageErrorCode(error_code)
-            if error_code in [StorageErrorCode.condition_not_met,
-                              StorageErrorCode.blob_overwritten]:
-                raise_error = ResourceModifiedError
-            if error_code in [StorageErrorCode.invalid_authentication_info,
-                              StorageErrorCode.authentication_failed]:
-                raise_error = ClientAuthenticationError
-            if error_code in [StorageErrorCode.resource_not_found,
-                              StorageErrorCode.cannot_verify_copy_source,
-                              StorageErrorCode.blob_not_found,
-                              StorageErrorCode.queue_not_found,
-                              StorageErrorCode.container_not_found,
-                              StorageErrorCode.parent_not_found,
-                              StorageErrorCode.share_not_found]:
-                raise_error = ResourceNotFoundError
-            if error_code in [StorageErrorCode.account_already_exists,
-                              StorageErrorCode.account_being_created,
-                              StorageErrorCode.resource_already_exists,
-                              StorageErrorCode.resource_type_mismatch,
-                              StorageErrorCode.blob_already_exists,
-                              StorageErrorCode.queue_already_exists,
-                              StorageErrorCode.container_already_exists,
-                              StorageErrorCode.container_being_deleted,
-                              StorageErrorCode.queue_being_deleted,
-                              StorageErrorCode.share_already_exists,
-                              StorageErrorCode.share_being_deleted]:
-                raise_error = ResourceExistsError
-    except ValueError:
-        # Got an unknown error code
-        pass
-
-    try:
-        error_message += "\nErrorCode:{}".format(error_code.value)
-    except AttributeError:
-        error_message += "\nErrorCode:{}".format(error_code)
-    for name, info in additional_data.items():
-        error_message += "\n{}:{}".format(name, info)
-
-    error = raise_error(message=error_message, response=storage_error.response)
-    error.error_code = error_code
-    error.additional_info = additional_data
-    raise error
-
-
-def parse_to_internal_user_delegation_key(service_user_delegation_key):
-    internal_user_delegation_key = UserDelegationKey()
-    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
-    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
-    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
-    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
-    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
-    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
-    internal_user_delegation_key.value = service_user_delegation_key.value
-    return internal_user_delegation_key
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,209 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from datetime import date
-
-from .parser import _str, _to_utc_datetime
-from .constants import X_MS_VERSION
-from . import sign_string, url_quote
-
-
-class QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-    SIGNED_OID = 'skoid'
-    SIGNED_TID = 'sktid'
-    SIGNED_KEY_START = 'skt'
-    SIGNED_KEY_EXPIRY = 'ske'
-    SIGNED_KEY_SERVICE = 'sks'
-    SIGNED_KEY_VERSION = 'skv'
-
-    @staticmethod
-    def to_list():
-        return [
-            QueryStringConstants.SIGNED_SIGNATURE,
-            QueryStringConstants.SIGNED_PERMISSION,
-            QueryStringConstants.SIGNED_START,
-            QueryStringConstants.SIGNED_EXPIRY,
-            QueryStringConstants.SIGNED_RESOURCE,
-            QueryStringConstants.SIGNED_IDENTIFIER,
-            QueryStringConstants.SIGNED_IP,
-            QueryStringConstants.SIGNED_PROTOCOL,
-            QueryStringConstants.SIGNED_VERSION,
-            QueryStringConstants.SIGNED_CACHE_CONTROL,
-            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
-            QueryStringConstants.SIGNED_CONTENT_ENCODING,
-            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
-            QueryStringConstants.SIGNED_CONTENT_TYPE,
-            QueryStringConstants.START_PK,
-            QueryStringConstants.START_RK,
-            QueryStringConstants.END_PK,
-            QueryStringConstants.END_RK,
-            QueryStringConstants.SIGNED_RESOURCE_TYPES,
-            QueryStringConstants.SIGNED_SERVICES,
-            QueryStringConstants.SIGNED_OID,
-            QueryStringConstants.SIGNED_TID,
-            QueryStringConstants.SIGNED_KEY_START,
-            QueryStringConstants.SIGNED_KEY_EXPIRY,
-            QueryStringConstants.SIGNED_KEY_SERVICE,
-            QueryStringConstants.SIGNED_KEY_VERSION,
-        ]
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service
-        or to create a new account object.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account
-            SAS. You can combine values to provide access to more than one
-            resource type.
-        :param AccountSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy. You can combine
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _str(val) if val is not None else None
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(QueryStringConstants.SIGNED_START, start)
-        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, policy_id):
-        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(QueryStringConstants.SIGNED_START) +
-             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,548 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from concurrent import futures
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from threading import Lock
-from itertools import islice
-from math import ceil
-
-import six
-
-from azure.core.tracing.common import with_current_context
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
-
-
-def _parallel_uploads(executor, uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(executor.submit(with_current_context(uploader), next_chunk))
-
-    # Wait for the remaining uploads to finish
-    done, _running = futures.wait(running)
-    range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        validate_content=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        validate_content=validate_content,
-        **kwargs)
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_chunk), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_substream_block), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b""
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError("Blob data should be of type bytes.")
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b"" or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_lock is not None:
-            with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop("modified_access_conditions", None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return index, block_id
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        return not any(bytearray(chunk_data))
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-            self.current_length = int(self.response_headers["blob_append_offset"])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        length = len(chunk_data)
-        chunk_end = chunk_offset + length - 1
-        response = self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            length,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
-
-
-class SubStream(IOBase):
-
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = (
-            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        )
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-        super(SubStream, self).__init__()
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, size=None):
-        if self.closed:  # pylint: disable=using-constant-test
-            raise ValueError("Stream is closed.")
-
-        if size is None:
-            size = self._length - self._position
-
-        # adjust if out of bounds
-        if size + self._position >= self._length:
-            size = self._length - self._position
-
-        # return fast
-        if size == 0 or self._buffer.closed:
-            return b""
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(size)
-        bytes_read = len(read_buffer)
-        bytes_remaining = size - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_concurrency > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = -offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
-
-
-class IterStreamer(object):
-    """
-    File-like streaming iterator.
-    """
-
-    def __init__(self, generator, encoding="UTF-8"):
-        self.generator = generator
-        self.iterator = iter(generator)
-        self.leftover = b""
-        self.encoding = encoding
-
-    def __len__(self):
-        return self.generator.__len__()
-
-    def __iter__(self):
-        return self.iterator
-
-    def seekable(self):
-        return False
-
-    def next(self):
-        return next(self.iterator)
-
-    def tell(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator does not support tell.")
-
-    def seek(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator is unseekable.")
-
-    def read(self, size):
-        data = self.leftover
-        count = len(self.leftover)
-        try:
-            while count < size:
-                chunk = self.next()
-                if isinstance(chunk, six.text_type):
-                    chunk = chunk.encode(self.encoding)
-                data += chunk
-                count += len(chunk)
-        except StopIteration:
-            pass
-
-        if count > size:
-            self.leftover = data[size:]
-
-        return data[:size]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared/uploads_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,350 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-import asyncio
-from asyncio import Lock
-from itertools import islice
-import threading
-
-from math import ceil
-
-import six
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-
-
-async def _parallel_uploads(uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(asyncio.ensure_future(uploader(next_chunk)))
-
-    # Wait for the remaining uploads to finish
-    if running:
-        done, _running = await asyncio.wait(running)
-        range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-async def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_chunk(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for chunk in uploader.get_chunk_streams():
-            range_ids.append(await uploader.process_chunk(chunk))
-
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-async def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_substream_block(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for block in uploader.get_substream_blocks():
-            range_ids.append(await uploader.process_substream_block(block))
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError('Blob data should be of type bytes.')
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    async def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    async def _update_progress(self, length):
-        if self.progress_lock is not None:
-            async with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = await self._upload_chunk(chunk_offset, chunk_data)
-        await self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    async def process_substream_block(self, block_data):
-        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = await self._upload_substream_block(block_id, block_stream)
-        await self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop('modified_access_conditions', None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        await self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options)
-        return index, block_id
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        try:
-            await self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        for each_byte in chunk_data:
-            if each_byte not in [0, b'\x00']:
-                return False
-        return True
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = await self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-            self.current_length = int(self.response_headers['blob_append_offset'])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        chunk_end = chunk_offset + len(chunk_data) - 1
-        response = await self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            chunk_end,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-        return range_id, response
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,571 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, TYPE_CHECKING
-)
-
-from ._shared import sign_string, url_quote
-from ._shared.constants import X_MS_VERSION
-from ._shared.models import Services
-from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \
-    QueryStringConstants
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from .import (
-        ResourceTypes,
-        AccountSasPermissions,
-        UserDelegationKey,
-        ContainerSasPermissions,
-        BlobSasPermissions
-    )
-
-class BlobQueryStringConstants(object):
-    SIGNED_TIMESTAMP = 'snapshot'
-
-
-class BlobSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating blob and container access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key=None, user_delegation_key=None):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
-            Instead of an account key, the user could pass in a user delegation key.
-            A user delegation key can be obtained from the service by authenticating with an AAD identity;
-            this can be accomplished by calling get_user_delegation_key on any Blob service object.
-        '''
-        super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-        self.user_delegation_key = user_delegation_key
-
-    def generate_blob(self, container_name, blob_name, snapshot=None, permission=None,
-                      expiry=None, start=None, policy_id=None, ip=None, protocol=None,
-                      cache_control=None, content_disposition=None,
-                      content_encoding=None, content_language=None,
-                      content_type=None):
-        '''
-        Generates a shared access signature for the blob or one of its snapshots.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param str blob_name:
-            Name of blob.
-        :param str snapshot:
-            The snapshot parameter is an opaque DateTime value that,
-            when present, specifies the blob snapshot to grant permission.
-        :param BlobSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str policy_id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = container_name + '/' + blob_name
-
-        sas = _BlobSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(policy_id)
-        sas.add_resource('b' if snapshot is None else 'bs')
-        sas.add_timestamp(snapshot)
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, resource_path,
-                                   user_delegation_key=self.user_delegation_key)
-
-        return sas.get_token()
-
-    def generate_container(self, container_name, permission=None, expiry=None,
-                           start=None, policy_id=None, ip=None, protocol=None,
-                           cache_control=None, content_disposition=None,
-                           content_encoding=None, content_language=None,
-                           content_type=None):
-        '''
-        Generates a shared access signature for the container.
-        Use the returned signature with the sas_token parameter of any BlobService.
-
-        :param str container_name:
-            Name of container.
-        :param ContainerSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str policy_id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_blob_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _BlobSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(policy_id)
-        sas.add_resource('c')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, container_name,
-                                   user_delegation_key=self.user_delegation_key)
-        return sas.get_token()
-
-
-class _BlobSharedAccessHelper(_SharedAccessHelper):
-
-    def add_timestamp(self, timestamp):
-        self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp)
-
-    def get_value_to_append(self, query):
-        return_value = self.query_dict.get(query) or ''
-        return return_value + '\n'
-
-    def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None):
-        # pylint: disable = no-member
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/blob/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_START) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource)
-
-        if user_delegation_key is not None:
-            self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid)
-            self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid)
-            self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start)
-            self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry)
-            self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service)
-            self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version)
-
-            string_to_sign += \
-                (self.get_value_to_append(QueryStringConstants.SIGNED_OID) +
-                 self.get_value_to_append(QueryStringConstants.SIGNED_TID) +
-                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) +
-                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) +
-                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) +
-                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION))
-        else:
-            string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER)
-
-        string_to_sign += \
-            (self.get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) +
-             self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key if user_delegation_key is None else user_delegation_key.value,
-                                    string_to_sign))
-
-    def get_token(self):
-        # a conscious decision was made to exclude the timestamp in the generated token
-        # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp
-        exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP]
-        return '&'.join(['{0}={1}'.format(n, url_quote(v))
-                         for n, v in self.query_dict.items() if v is not None and n not in exclude])
-
-
-def generate_account_sas(
-        account_name,  # type: str
-        account_key,  # type: str
-        resource_types,  # type: Union[ResourceTypes, str]
-        permission,  # type: Union[AccountSasPermissions, str]
-        expiry,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):  # type: (...) -> str
-    """Generates a shared access signature for the blob service.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-    :param resource_types:
-        Specifies the resource types that are accessible with the account SAS.
-    :type resource_types: str or ~azure.storage.blob.ResourceTypes
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.AccountSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_authentication.py
-            :start-after: [START create_sas_token]
-            :end-before: [END create_sas_token]
-            :language: python
-            :dedent: 8
-            :caption: Generating a shared access signature.
-    """
-    sas = SharedAccessSignature(account_name, account_key)
-    return sas.generate_account(
-        services=Services(blob=True),
-        resource_types=resource_types,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs
-    ) # type: ignore
-
-
-def generate_container_sas(
-        account_name,  # type: str
-        container_name,  # type: str
-        account_key=None,  # type: Optional[str]
-        user_delegation_key=None,  # type: Optional[UserDelegationKey]
-        permission=None,  # type: Optional[Union[ContainerSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        policy_id=None,  # type: Optional[str]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):
-    # type: (...) -> Any
-    """Generates a shared access signature for a container.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str container_name:
-        The name of the container.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-        Either `account_key` or `user_delegation_key` must be specified.
-    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
-        Instead of an account shared key, the user could pass in a user delegation key.
-        A user delegation key can be obtained from the service by authenticating with an AAD identity;
-        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
-        When present, the SAS is signed with the user delegation key instead.
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.ContainerSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str policy_id:
-        A unique value up to 64 characters in length that correlates to a
-        stored access policy. To create a stored access policy, use
-        :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`.
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_containers.py
-            :start-after: [START generate_sas_token]
-            :end-before: [END generate_sas_token]
-            :language: python
-            :dedent: 12
-            :caption: Generating a sas token.
-    """
-    if not user_delegation_key and not account_key:
-        raise ValueError("Either user_delegation_key or account_key must be provided.")
-
-    if user_delegation_key:
-        sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key)
-    else:
-        sas = BlobSharedAccessSignature(account_name, account_key=account_key)
-    return sas.generate_container(
-        container_name,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        policy_id=policy_id,
-        ip=ip,
-        **kwargs
-    )
-
-
-def generate_blob_sas(
-        account_name,  # type: str
-        container_name,  # type: str
-        blob_name,  # type: str
-        snapshot=None,  # type: Optional[str]
-        account_key=None,  # type: Optional[str]
-        user_delegation_key=None,  # type: Optional[UserDelegationKey]
-        permission=None,  # type: Optional[Union[BlobSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        policy_id=None,  # type: Optional[str]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):
-    # type: (...) -> Any
-    """Generates a shared access signature for a blob.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str container_name:
-        The name of the container.
-    :param str blob_name:
-        The name of the blob.
-    :param str snapshot:
-        An optional blob snapshot ID.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-        Either `account_key` or `user_delegation_key` must be specified.
-    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
-        Instead of an account shared key, the user could pass in a user delegation key.
-        A user delegation key can be obtained from the service by authenticating with an AAD identity;
-        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
-        When present, the SAS is signed with the user delegation key instead.
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.BlobSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str policy_id:
-        A unique value up to 64 characters in length that correlates to a
-        stored access policy. To create a stored access policy, use
-        :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`.
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-    """
-    if not user_delegation_key and not account_key:
-        raise ValueError("Either user_delegation_key or account_key must be provided.")
-
-    if user_delegation_key:
-        sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key)
-    else:
-        sas = BlobSharedAccessSignature(account_name, account_key=account_key)
-    return sas.generate_blob(
-        container_name,
-        blob_name,
-        snapshot=snapshot,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        policy_id=policy_id,
-        ip=ip,
-        **kwargs
-    )
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_upload_helpers.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_upload_helpers.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_upload_helpers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_upload_helpers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,281 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from io import SEEK_SET, UnsupportedOperation
-from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import
-
-import six
-from azure.core.exceptions import ResourceExistsError, ResourceModifiedError
-
-from ._shared.response_handlers import (
-    process_storage_error,
-    return_response_headers)
-from ._shared.models import StorageErrorCode
-from ._shared.uploads import (
-    upload_data_chunks,
-    upload_substream_blocks,
-    BlockBlobChunkUploader,
-    PageBlobChunkUploader,
-    AppendBlobChunkUploader)
-from ._shared.encryption import generate_blob_encryption_data, encrypt_blob
-from ._generated.models import (
-    StorageErrorException,
-    BlockLookupList,
-    AppendPositionAccessConditions,
-    ModifiedAccessConditions,
-)
-
-if TYPE_CHECKING:
-    from datetime import datetime # pylint: disable=unused-import
-    BlobLeaseClient = TypeVar("BlobLeaseClient")
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-
-
-def _convert_mod_error(error):
-    message = error.message.replace(
-        "The condition specified using HTTP conditional header(s) is not met.",
-        "The specified blob already exists.")
-    message = message.replace("ConditionNotMet", "BlobAlreadyExists")
-    overwrite_error = ResourceExistsError(
-        message=message,
-        response=error.response,
-        error=error)
-    overwrite_error.error_code = StorageErrorCode.blob_already_exists
-    raise overwrite_error
-
-
-def _any_conditions(modified_access_conditions=None, **kwargs):  # pylint: disable=unused-argument
-    return any([
-        modified_access_conditions.if_modified_since,
-        modified_access_conditions.if_unmodified_since,
-        modified_access_conditions.if_none_match,
-        modified_access_conditions.if_match
-    ])
-
-
-def upload_block_blob(  # pylint: disable=too-many-locals
-        client=None,
-        data=None,
-        stream=None,
-        length=None,
-        overwrite=None,
-        headers=None,
-        validate_content=None,
-        max_concurrency=None,
-        blob_settings=None,
-        encryption_options=None,
-        **kwargs):
-    try:
-        if not overwrite and not _any_conditions(**kwargs):
-            kwargs['modified_access_conditions'].if_none_match = '*'
-        adjusted_count = length
-        if (encryption_options.get('key') is not None) and (adjusted_count is not None):
-            adjusted_count += (16 - (length % 16))
-        blob_headers = kwargs.pop('blob_headers', None)
-        tier = kwargs.pop('standard_blob_tier', None)
-
-        # Do single put if the size is smaller than config.max_single_put_size
-        if adjusted_count is not None and (adjusted_count < blob_settings.max_single_put_size):
-            try:
-                data = data.read(length)
-                if not isinstance(data, six.binary_type):
-                    raise TypeError('Blob data should be of type bytes.')
-            except AttributeError:
-                pass
-            if encryption_options.get('key'):
-                encryption_data, data = encrypt_blob(data, encryption_options['key'])
-                headers['x-ms-meta-encryptiondata'] = encryption_data
-            return client.upload(
-                data,
-                content_length=adjusted_count,
-                blob_http_headers=blob_headers,
-                headers=headers,
-                cls=return_response_headers,
-                validate_content=validate_content,
-                data_stream_total=adjusted_count,
-                upload_stream_current=0,
-                tier=tier.value if tier else None,
-                **kwargs)
-
-        use_original_upload_path = blob_settings.use_byte_buffer or \
-            validate_content or encryption_options.get('required') or \
-            blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
-            hasattr(stream, 'seekable') and not stream.seekable() or \
-            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
-
-        if use_original_upload_path:
-            if encryption_options.get('key'):
-                cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key'])
-                headers['x-ms-meta-encryptiondata'] = encryption_data
-                encryption_options['cek'] = cek
-                encryption_options['vector'] = iv
-            block_ids = upload_data_chunks(
-                service=client,
-                uploader_class=BlockBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                max_concurrency=max_concurrency,
-                stream=stream,
-                validate_content=validate_content,
-                encryption_options=encryption_options,
-                **kwargs
-            )
-        else:
-            block_ids = upload_substream_blocks(
-                service=client,
-                uploader_class=BlockBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                max_concurrency=max_concurrency,
-                stream=stream,
-                validate_content=validate_content,
-                **kwargs
-            )
-
-        block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
-        block_lookup.latest = block_ids
-        return client.commit_block_list(
-            block_lookup,
-            blob_http_headers=blob_headers,
-            cls=return_response_headers,
-            validate_content=validate_content,
-            headers=headers,
-            tier=tier.value if tier else None,
-            **kwargs)
-    except StorageErrorException as error:
-        try:
-            process_storage_error(error)
-        except ResourceModifiedError as mod_error:
-            if not overwrite:
-                _convert_mod_error(mod_error)
-            raise
-
-
-def upload_page_blob(
-        client=None,
-        stream=None,
-        length=None,
-        overwrite=None,
-        headers=None,
-        validate_content=None,
-        max_concurrency=None,
-        blob_settings=None,
-        encryption_options=None,
-        **kwargs):
-    try:
-        if not overwrite and not _any_conditions(**kwargs):
-            kwargs['modified_access_conditions'].if_none_match = '*'
-        if length is None or length < 0:
-            raise ValueError("A content length must be specified for a Page Blob.")
-        if length % 512 != 0:
-            raise ValueError("Invalid page blob size: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(length))
-        if kwargs.get('premium_page_blob_tier'):
-            premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
-            try:
-                headers['x-ms-access-tier'] = premium_page_blob_tier.value
-            except AttributeError:
-                headers['x-ms-access-tier'] = premium_page_blob_tier
-        if encryption_options and encryption_options.get('data'):
-            headers['x-ms-meta-encryptiondata'] = encryption_options['data']
-        response = client.create(
-            content_length=0,
-            blob_content_length=length,
-            blob_sequence_number=None,
-            blob_http_headers=kwargs.pop('blob_headers', None),
-            cls=return_response_headers,
-            headers=headers,
-            **kwargs)
-        if length == 0:
-            return response
-
-        kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
-        return upload_data_chunks(
-            service=client,
-            uploader_class=PageBlobChunkUploader,
-            total_size=length,
-            chunk_size=blob_settings.max_page_size,
-            stream=stream,
-            max_concurrency=max_concurrency,
-            validate_content=validate_content,
-            encryption_options=encryption_options,
-            **kwargs)
-
-    except StorageErrorException as error:
-        try:
-            process_storage_error(error)
-        except ResourceModifiedError as mod_error:
-            if not overwrite:
-                _convert_mod_error(mod_error)
-            raise
-
-
-def upload_append_blob(  # pylint: disable=unused-argument
-        client=None,
-        stream=None,
-        length=None,
-        overwrite=None,
-        headers=None,
-        validate_content=None,
-        max_concurrency=None,
-        blob_settings=None,
-        encryption_options=None,
-        **kwargs):
-    try:
-        if length == 0:
-            return {}
-        blob_headers = kwargs.pop('blob_headers', None)
-        append_conditions = AppendPositionAccessConditions(
-            max_size=kwargs.pop('maxsize_condition', None),
-            append_position=None)
-        try:
-            if overwrite:
-                client.create(
-                    content_length=0,
-                    blob_http_headers=blob_headers,
-                    headers=headers,
-                    **kwargs)
-            return upload_data_chunks(
-                service=client,
-                uploader_class=AppendBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                stream=stream,
-                max_concurrency=max_concurrency,
-                validate_content=validate_content,
-                append_position_access_conditions=append_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            if error.response.status_code != 404:
-                raise
-            # rewind the request body if it is a stream
-            if hasattr(stream, 'read'):
-                try:
-                    # attempt to rewind the body to the initial position
-                    stream.seek(0, SEEK_SET)
-                except UnsupportedOperation:
-                    # if body is not seekable, then retry would not work
-                    raise error
-            client.create(
-                content_length=0,
-                blob_http_headers=blob_headers,
-                headers=headers,
-                **kwargs)
-            return upload_data_chunks(
-                service=client,
-                uploader_class=AppendBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                stream=stream,
-                max_concurrency=max_concurrency,
-                validate_content=validate_content,
-                append_position_access_conditions=append_conditions,
-                **kwargs)
-    except StorageErrorException as error:
-        process_storage_error(error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_version.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_version.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/_version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-VERSION = "12.3.1"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,137 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-
-from .._models import BlobType
-from .._shared.policies_async import ExponentialRetry, LinearRetry
-from ._blob_client_async import BlobClient
-from ._container_client_async import ContainerClient
-from ._blob_service_client_async import BlobServiceClient
-from ._lease_async import BlobLeaseClient
-from ._download_async import StorageStreamDownloader
-
-
-async def upload_blob_to_url(
-        blob_url,  # type: str
-        data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-        credential=None,  # type: Any
-        **kwargs):
-    # type: (...) -> dict[str, Any]
-    """Upload data to a given URL
-
-    The data will be uploaded as a block blob.
-
-    :param str blob_url:
-        The full URI to the blob. This can also include a SAS token.
-    :param data:
-        The data to upload. This can be bytes, text, an iterable or a file-like object.
-    :type data: bytes or str or Iterable
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        blob URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword bool overwrite:
-        Whether the blob to be uploaded should overwrite the current data.
-        If True, upload_blob_to_url will overwrite any existing data. If set to False, the
-        operation will fail with a ResourceExistsError.
-    :keyword int max_concurrency:
-        The number of parallel connections with which to download.
-    :keyword int length:
-        Number of bytes to read from the stream. This is optional, but
-        should be supplied for optimal performance.
-    :keyword dict(str,str) metadata:
-        Name-value pairs associated with the blob as metadata.
-    :keyword bool validate_content:
-        If true, calculates an MD5 hash for each chunk of the blob. The storage
-        service checks the hash of the content that has arrived with the hash
-        that was sent. This is primarily valuable for detecting bitflips on
-        the wire if using http instead of https as https (the default) will
-        already validate. Note that this MD5 hash is not stored with the
-        blob. Also note that if enabled, the memory-efficient upload algorithm
-        will not be used, because computing the MD5 hash requires buffering
-        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-    :keyword str encoding:
-        Encoding to use if text is supplied as input. Defaults to UTF-8.
-    :returns: Blob-updated property dict (Etag and last modified)
-    :rtype: dict(str, Any)
-    """
-    async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
-        return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs)
-
-
-async def _download_to_stream(client, handle, **kwargs):
-    """Download data to specified open file-handle."""
-    stream = await client.download_blob(**kwargs)
-    await stream.readinto(handle)
-
-
-async def download_blob_from_url(
-        blob_url,  # type: str
-        output,  # type: str
-        credential=None,  # type: Any
-        **kwargs):
-    # type: (...) -> None
-    """Download the contents of a blob to a local file or stream.
-
-    :param str blob_url:
-        The full URI to the blob. This can also include a SAS token.
-    :param output:
-        Where the data should be downloaded to. This could be either a file path to write to,
-        or an open IO handle to write to.
-    :type output: str or writable stream
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
-        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword bool overwrite:
-        Whether the local file should be overwritten if it already exists. The default value is
-        `False` - in which case a ValueError will be raised if the file already exists. If set to
-        `True`, an attempt will be made to write to the existing file. If a stream handle is passed
-        in, this value is ignored.
-    :keyword int max_concurrency:
-        The number of parallel connections with which to download.
-    :keyword int offset:
-        Start of byte range to use for downloading a section of the blob.
-        Must be set if length is provided.
-    :keyword int length:
-        Number of bytes to read from the stream. This is optional, but
-        should be supplied for optimal performance.
-    :keyword bool validate_content:
-        If true, calculates an MD5 hash for each chunk of the blob. The storage
-        service checks the hash of the content that has arrived with the hash
-        that was sent. This is primarily valuable for detecting bitflips on
-        the wire if using http instead of https as https (the default) will
-        already validate. Note that this MD5 hash is not stored with the
-        blob. Also note that if enabled, the memory-efficient upload algorithm
-        will not be used, because computing the MD5 hash requires buffering
-        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-    :rtype: None
-    """
-    overwrite = kwargs.pop('overwrite', False)
-    async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
-        if hasattr(output, 'write'):
-            await _download_to_stream(client, output, **kwargs)
-        else:
-            if not overwrite and os.path.isfile(output):
-                raise ValueError("The file '{}' already exists.".format(output))
-            with open(output, 'wb') as file_handle:
-                await _download_to_stream(client, file_handle, **kwargs)
-
-
-__all__ = [
-    'upload_blob_to_url',
-    'download_blob_from_url',
-    'BlobServiceClient',
-    'ContainerClient',
-    'BlobClient',
-    'BlobLeaseClient',
-    'ExponentialRetry',
-    'LinearRetry',
-    'StorageStreamDownloader'
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_client_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1990 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-many-lines
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
-    TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator_async import distributed_trace_async
-
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin
-from .._shared.policies_async import ExponentialRetry
-from .._shared.response_handlers import return_response_headers, process_storage_error
-from .._deserialize import get_page_ranges_result
-from .._serialize import get_modify_conditions, get_api_version
-from .._generated import VERSION
-from .._generated.aio import AzureBlobStorage
-from .._generated.models import StorageErrorException, CpkInfo
-from .._deserialize import deserialize_blob_properties
-from .._blob_client import BlobClient as BlobClientBase
-from ._upload_helpers import (
-    upload_block_blob,
-    upload_append_blob,
-    upload_page_blob)
-from .._models import BlobType, BlobBlock
-from .._lease import get_access_conditions
-from ._lease_async import BlobLeaseClient
-from ._download_async import StorageStreamDownloader
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.pipeline.policies import HTTPPolicy
-    from .._models import (  # pylint: disable=unused-import
-        ContainerProperties,
-        BlobProperties,
-        BlobSasPermissions,
-        ContentSettings,
-        PremiumPageBlobTier,
-        StandardBlobTier,
-        SequenceNumberAction
-    )
-
-
-class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase):  # pylint: disable=too-many-public-methods
-    """A client to interact with a specific blob, although that blob may not yet exist.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the blob,
-        use the :func:`from_blob_url` classmethod.
-    :param container_name: The container name for the blob.
-    :type container_name: str
-    :param blob_name: The name of the blob with which to interact. If specified, this value will override
-        a blob value specified in the blob URL.
-    :type blob_name: str
-    :param str snapshot:
-        The optional blob snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`create_snapshot`.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.2.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
-        Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be
-        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
-        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
-    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
-        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
-    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
-    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
-        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
-    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
-        or 4MB.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_authentication_async.py
-            :start-after: [START create_blob_client]
-            :end-before: [END create_blob_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
-
-        .. literalinclude:: ../samples/blob_samples_authentication_async.py
-            :start-after: [START create_blob_client_sas_url]
-            :end-before: [END create_blob_client_sas_url]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobClient from a SAS URL to a blob.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            container_name,  # type: str
-            blob_name,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        super(BlobClient, self).__init__(
-            account_url,
-            container_name=container_name,
-            blob_name=blob_name,
-            snapshot=snapshot,
-            credential=credential,
-            **kwargs)
-        self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = kwargs.get('loop', None)
-
-    @distributed_trace_async
-    async def get_account_information(self, **kwargs): # type: ignore
-        # type: (Optional[int]) -> Dict[str, str]
-        """Gets information related to the storage account in which the blob resides.
-
-        The information can also be retrieved if the user has a SAS to a container or blob.
-        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
-
-        :returns: A dict of account information (SKU and account type).
-        :rtype: dict(str, str)
-        """
-        try:
-            return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def upload_blob(
-            self, data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            blob_type=BlobType.BlockBlob,  # type: Union[str, BlobType]
-            length=None,  # type: Optional[int]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Any
-        """Creates a new blob from a data source with automatic chunking.
-
-        :param data: The blob data to upload.
-        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
-            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
-            If True, upload_blob will overwrite the existing data. If set to False, the
-            operation will fail with ResourceExistsError. The exception to the above is with Append
-            blob types: if set to False and the data already exists, an error will not be raised
-            and the data will be appended to the existing blob. If set overwrite=True, then the existing
-            append blob will be deleted, and a new one created. Defaults to False.
-        :keyword ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            If specified, upload_blob only succeeds if the
-            blob's lease is active and matches this ID.
-            Required if the blob has an active lease.
-        :paramtype: ~azure.storage.blob.aio.BlobLeaseClient
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: Blob-updated property dict (Etag and last modified)
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
-                :start-after: [START upload_a_blob]
-                :end-before: [END upload_a_blob]
-                :language: python
-                :dedent: 16
-                :caption: Upload a blob to the container.
-        """
-        options = self._upload_blob_options(
-            data,
-            blob_type=blob_type,
-            length=length,
-            metadata=metadata,
-            **kwargs)
-        if blob_type == BlobType.BlockBlob:
-            return await upload_block_blob(**options)
-        if blob_type == BlobType.PageBlob:
-            return await upload_page_blob(**options)
-        return await upload_append_blob(**options)
-
-    @distributed_trace_async
-    async def download_blob(self, offset=None, length=None, **kwargs):
-        # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
-        """Downloads a blob to the StorageStreamDownloader. The readall() method must
-        be used to read all the content or readinto() must be used to download the blob into
-        a stream.
-
-        :param int offset:
-            Start of byte range to use for downloading a section of the blob.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, download_blob only
-            succeeds if the blob's lease is active and matches this ID. Value can be a
-            BlobLeaseClient object or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :keyword str encoding:
-            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: A streaming object (StorageStreamDownloader)
-        :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
-                :start-after: [START download_a_blob]
-                :end-before: [END download_a_blob]
-                :language: python
-                :dedent: 16
-                :caption: Download a blob.
-        """
-        options = self._download_blob_options(
-            offset=offset,
-            length=length,
-            **kwargs)
-        downloader = StorageStreamDownloader(**options)
-        await downloader._setup()  # pylint: disable=protected-access
-        return downloader
-
-    @distributed_trace_async
-    async def delete_blob(self, delete_snapshots=False, **kwargs):
-        # type: (bool, Any) -> None
-        """Marks the specified blob for deletion.
-
-        The blob is later deleted during garbage collection.
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the delete_blob()
-        operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blob
-        and retains the blob for a specified number of days.
-        After the specified number of days, the blob's data is removed from the service during garbage collection.
-        Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
-        option. Soft-deleted blob can be restored using :func:`undelete` operation.
-
-        :param str delete_snapshots:
-            Required if the blob has associated snapshots. Values include:
-             - "only": Deletes only the blobs snapshots.
-             - "include": Deletes the blob along with all snapshots.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, delete_blob only
-            succeeds if the blob's lease is active and matches this ID. Value can be a
-            BlobLeaseClient object or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
-                :start-after: [START delete_blob]
-                :end-before: [END delete_blob]
-                :language: python
-                :dedent: 16
-                :caption: Delete a blob.
-        """
-        options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs)
-        try:
-            await self._client.blob.delete(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def undelete_blob(self, **kwargs):
-        # type: (Any) -> None
-        """Restores soft-deleted blobs or snapshots.
-
-        Operation will only be successful if used within the specified number of days
-        set in the delete retention policy.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START undelete_blob]
-                :end-before: [END undelete_blob]
-                :language: python
-                :dedent: 12
-                :caption: Undeleting a blob.
-        """
-        try:
-            await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_blob_properties(self, **kwargs):
-        # type: (Any) -> BlobProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the blob. It does not return the content of the blob.
-
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: BlobProperties
-        :rtype: ~azure.storage.blob.BlobProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 12
-                :caption: Getting the properties for a blob.
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_modify_conditions(kwargs)
-        cpk = kwargs.pop('cpk', None)
-        cpk_info = None
-        if cpk:
-            if self.scheme.lower() != 'https':
-                raise ValueError("Customer provided encryption key must be used over HTTPS.")
-            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
-                               encryption_algorithm=cpk.algorithm)
-        try:
-            blob_props = await self._client.blob.get_properties(
-                timeout=kwargs.pop('timeout', None),
-                snapshot=self.snapshot,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                cls=deserialize_blob_properties,
-                cpk_info=cpk_info,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        blob_props.name = self.blob_name
-        blob_props.container = self.container_name
-        return blob_props # type: ignore
-
-    @distributed_trace_async
-    async def set_http_headers(self, content_settings=None, **kwargs):
-        # type: (Optional[ContentSettings], Any) -> None
-        """Sets system properties on the blob.
-
-        If one property is set for the content_settings, all properties will be overridden.
-
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified)
-        :rtype: Dict[str, Any]
-        """
-        options = self._set_http_headers_options(content_settings=content_settings, **kwargs)
-        try:
-            return await self._client.blob.set_http_headers(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_blob_metadata(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
-        """Sets user-defined metadata for the blob as one or more name-value pairs.
-
-        :param metadata:
-            Dict containing name and value pairs. Each call to this operation
-            replaces all existing metadata attached to the blob. To remove all
-            metadata from the blob, call this operation with no metadata headers.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified)
-        """
-        options = self._set_blob_metadata_options(metadata=metadata, **kwargs)
-        try:
-            return await self._client.blob.set_metadata(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def create_page_blob(  # type: ignore
-            self, size,  # type: int
-            content_settings=None,  # type: Optional[ContentSettings]
-            metadata=None, # type: Optional[Dict[str, str]]
-            premium_page_blob_tier=None,  # type: Optional[Union[str, PremiumPageBlobTier]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Creates a new Page Blob of the specified size.
-
-        :param int size:
-            This specifies the maximum size for the page blob, up to 1 TB.
-            The page blob size must be aligned to a 512-byte boundary.
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword int sequence_number:
-            Only for Page blobs. The sequence number is a user-controlled value that you can use to
-            track requests. The value of the sequence number must be between 0
-            and 2^63 - 1.The default value is 0.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict[str, Any]
-        """
-        options = self._create_page_blob_options(
-            size,
-            content_settings=content_settings,
-            metadata=metadata,
-            premium_page_blob_tier=premium_page_blob_tier,
-            **kwargs)
-        try:
-            return await self._client.page_blob.create(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def create_append_blob(self, content_settings=None, metadata=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
-        """Creates a new Append Blob.
-
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict[str, Any]
-        """
-        options = self._create_append_blob_options(
-            content_settings=content_settings,
-            metadata=metadata,
-            **kwargs)
-        try:
-            return await self._client.append_blob.create(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def create_snapshot(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
-        """Creates a snapshot of the blob.
-
-        A snapshot is a read-only version of a blob that's taken at a point in time.
-        It can be read, copied, or deleted, but not modified. Snapshots provide a way
-        to back up a blob as it appears at a moment in time.
-
-        A snapshot of a blob has the same name as the base blob from which the snapshot
-        is taken, with a DateTime value appended to indicate the time at which the
-        snapshot was taken.
-
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START create_blob_snapshot]
-                :end-before: [END create_blob_snapshot]
-                :language: python
-                :dedent: 12
-                :caption: Create a snapshot of the blob.
-        """
-        options = self._create_snapshot_options(metadata=metadata, **kwargs)
-        try:
-            return await self._client.blob.create_snapshot(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs):
-        # type: (str, Optional[Dict[str, str]], bool, Any) -> Any
-        """Copies a blob asynchronously.
-
-        This operation returns a copy operation
-        object that can be used to wait on the completion of the operation,
-        as well as check status or abort the copy operation.
-        The Blob service copies blobs on a best-effort basis.
-
-        The source blob for a copy operation may be a block blob, an append blob,
-        or a page blob. If the destination blob already exists, it must be of the
-        same blob type as the source blob. Any existing destination blob will be
-        overwritten. The destination blob cannot be modified while a copy operation
-        is in progress.
-
-        When copying from a page blob, the Blob service creates a destination page
-        blob of the source blob's length, initially containing all zeroes. Then
-        the source page ranges are enumerated, and non-empty ranges are copied.
-
-        For a block blob or an append blob, the Blob service creates a committed
-        blob of zero length before returning from this operation. When copying
-        from a block blob, all committed blocks and their block IDs are copied.
-        Uncommitted blocks are not copied. At the end of the copy operation, the
-        destination blob will have the same committed block count as the source.
-
-        When copying from an append blob, all committed blocks are copied. At the
-        end of the copy operation, the destination blob will have the same committed
-        block count as the source.
-
-        For all blob types, you can call status() on the returned polling object
-        to check the status of the copy operation, or wait() to block until the
-        operation is complete. The final blob will be committed when the copy completes.
-
-        :param str source_url:
-            A URL of up to 2 KB in length that specifies a file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.blob.core.windows.net/mycontainer/myblob
-
-            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
-
-            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
-        :param metadata:
-            Name-value pairs associated with the blob as metadata. If no name-value
-            pairs are specified, the operation will copy the metadata from the
-            source blob or file to the destination blob. If one or more name-value
-            pairs are specified, the destination blob is created with the specified
-            metadata, and metadata is not copied from the source blob or file.
-        :type metadata: dict(str, str)
-        :param bool incremental_copy:
-            Copies the snapshot of the source page blob to a destination page blob.
-            The snapshot is copied such that only the differential changes between
-            the previously copied snapshot are transferred to the destination.
-            The copied snapshots are complete copies of the original snapshot and
-            can be read or copied from as usual. Defaults to False.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has been modified since the specified date/time.
-            If the destination blob has not been modified, the Blob service returns
-            status code 412 (Precondition Failed).
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only
-            if the destination blob has not been modified since the specified
-            date/time. If the destination blob has been modified, the Blob service
-            returns status code 412 (Precondition Failed).
-        :keyword str etag:
-            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The destination match condition to use upon the etag.
-        :keyword destination_lease:
-            The lease ID specified for this header must match the lease ID of the
-            destination blob. If the request does not include the lease ID or it is not
-            valid, the operation fails with status code 412 (Precondition Failed).
-        :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword source_lease:
-            Specify this to perform the Copy Blob operation only if
-            the lease ID given matches the active lease ID of the source blob.
-        :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
-            Indicates the priority with which to rehydrate an archived blob
-        :keyword bool requires_sync:
-            Enforces that the service will not return a response until the copy is complete.
-        :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
-        :rtype: dict[str, str or ~datetime.datetime]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START copy_blob_from_url]
-                :end-before: [END copy_blob_from_url]
-                :language: python
-                :dedent: 16
-                :caption: Copy a blob from a URL.
-        """
-        options = self._start_copy_from_url_options(
-            source_url,
-            metadata=metadata,
-            incremental_copy=incremental_copy,
-            **kwargs)
-        try:
-            if incremental_copy:
-                return await self._client.page_blob.copy_incremental(**options)
-            return await self._client.blob.start_copy_from_url(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def abort_copy(self, copy_id, **kwargs):
-        # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None
-        """Abort an ongoing copy operation.
-
-        This will leave a destination blob with zero length and full metadata.
-        This will raise an error if the copy operation has already ended.
-
-        :param copy_id:
-            The copy operation to abort. This can be either an ID, or an
-            instance of BlobProperties.
-        :type copy_id: str or ~azure.storage.blob.BlobProperties
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START abort_copy_blob_from_url]
-                :end-before: [END abort_copy_blob_from_url]
-                :language: python
-                :dedent: 16
-                :caption: Abort copying a blob from URL.
-        """
-        options = self._abort_copy_options(copy_id, **kwargs)
-        try:
-            await self._client.blob.abort_copy_from_url(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs):
-        # type: (int, Optional[str], Any) -> BlobLeaseClient
-        """Requests a new lease.
-
-        If the blob does not have an active lease, the Blob
-        Service creates a lease on the blob and returns a new lease.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The Blob Service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A BlobLeaseClient object.
-        :rtype: ~azure.storage.blob.aio.BlobLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START acquire_lease_on_blob]
-                :end-before: [END acquire_lease_on_blob]
-                :language: python
-                :dedent: 12
-                :caption: Acquiring a lease on a blob.
-        """
-        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
-        await lease.acquire(lease_duration=lease_duration, **kwargs)
-        return lease
-
-    @distributed_trace_async
-    async def set_standard_blob_tier(self, standard_blob_tier, **kwargs):
-        # type: (Union[str, StandardBlobTier], Any) -> None
-        """This operation sets the tier on a block blob.
-
-        A block blob's tier determines Hot/Cool/Archive storage type.
-        This operation does not update the blob's ETag.
-
-        :param standard_blob_tier:
-            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
-            'Archive'. The hot tier is optimized for storing data that is accessed
-            frequently. The cool storage tier is optimized for storing data that
-            is infrequently accessed and stored for at least a month. The archive
-            tier is optimized for storing data that is rarely accessed and stored
-            for at least six months with flexible latency requirements.
-        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
-        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
-            Indicates the priority with which to rehydrate an archived blob
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :rtype: None
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if standard_blob_tier is None:
-            raise ValueError("A StandardBlobTier must be specified")
-        try:
-            await self._client.blob.set_tier(
-                tier=standard_blob_tier,
-                timeout=kwargs.pop('timeout', None),
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def stage_block(
-            self, block_id,  # type: str
-            data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Creates a new block to be committed as part of a blob.
-
-        :param str block_id: A valid Base64 string value that identifies the
-             block. Prior to encoding, the string must be less than or equal to 64
-             bytes in size. For a given blob, the length of the value specified for
-             the block_id parameter must be the same size for each block.
-        :param data: The blob data.
-        :param int length: Size of the block.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        options = self._stage_block_options(
-            block_id,
-            data,
-            length=length,
-            **kwargs)
-        try:
-            return await self._client.block_blob.stage_block(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def stage_block_from_url(
-            self, block_id,  # type: str
-            source_url,  # type: str
-            source_offset=None,  # type: Optional[int]
-            source_length=None,  # type: Optional[int]
-            source_content_md5=None,  # type: Optional[Union[bytes, bytearray]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Creates a new block to be committed as part of a blob where
-        the contents are read from a URL.
-
-        :param str block_id: A valid Base64 string value that identifies the
-             block. Prior to encoding, the string must be less than or equal to 64
-             bytes in size. For a given blob, the length of the value specified for
-             the block_id parameter must be the same size for each block.
-        :param str source_url: The URL.
-        :param int source_offset:
-            Start of byte range to use for the block.
-            Must be set if source length is provided.
-        :param int source_length: The size of the block in bytes.
-        :param bytearray source_content_md5:
-            Specify the md5 calculated for the range of
-            bytes that must be read from the copy source.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        options = self._stage_block_from_url_options(
-            block_id,
-            source_url,
-            source_offset=source_offset,
-            source_length=source_length,
-            source_content_md5=source_content_md5,
-            **kwargs)
-        try:
-            return await self._client.block_blob.stage_block_from_url(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_block_list(self, block_list_type="committed", **kwargs):
-        # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]]
-        """The Get Block List operation retrieves the list of blocks that have
-        been uploaded as part of a block blob.
-
-        :param str block_list_type:
-            Specifies whether to return the list of committed
-            blocks, the list of uncommitted blocks, or both lists together.
-            Possible values include: 'committed', 'uncommitted', 'all'
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A tuple of two lists - committed and uncommitted blocks
-        :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock))
-        """
-        access_conditions = get_access_conditions(kwargs.pop('kease', None))
-        try:
-            blocks = await self._client.block_blob.get_block_list(
-                list_type=block_list_type,
-                snapshot=self.snapshot,
-                timeout=kwargs.pop('timeout', None),
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return self._get_block_list_result(blocks)
-
-    @distributed_trace_async
-    async def commit_block_list( # type: ignore
-            self, block_list,  # type: List[BlobBlock]
-            content_settings=None,  # type: Optional[ContentSettings]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """The Commit Block List operation writes a blob by specifying the list of
-        block IDs that make up the blob.
-
-        :param list block_list:
-            List of Blockblobs.
-        :param ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict[str, str]
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https, as https (the default),
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._commit_block_list_options(
-            block_list,
-            content_settings=content_settings,
-            metadata=metadata,
-            **kwargs)
-        try:
-            return await self._client.block_blob.commit_block_list(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs):
-        # type: (Union[str, PremiumPageBlobTier], **Any) -> None
-        """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
-
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :rtype: None
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if premium_page_blob_tier is None:
-            raise ValueError("A PremiumPageBlobTiermust be specified")
-        try:
-            await self._client.blob.set_tier(
-                tier=premium_page_blob_tier,
-                timeout=kwargs.pop('timeout', None),
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_page_ranges( # type: ignore
-            self, offset=None, # type: Optional[int]
-            length=None, # type: Optional[int]
-            previous_snapshot_diff=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            **kwargs
-        ):
-        # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
-        """Returns the list of valid page ranges for a Page Blob or snapshot
-        of a page blob.
-
-        :param int offset:
-            Start of byte range to use for getting valid page ranges.
-            If no length is given, all bytes after the offset will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for getting valid page ranges.
-            If length is given, offset must be provided.
-            This range will return valid page ranges from the offset start up to
-            the specified length.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param str previous_snapshot_diff:
-            The snapshot diff parameter that contains an opaque DateTime value that
-            specifies a previous blob snapshot to be compared
-            against a more recent snapshot or the current blob.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
-            The first element are filled page ranges, the 2nd element is cleared page ranges.
-        :rtype: tuple(list(dict(str, str), list(dict(str, str))
-        """
-        options = self._get_page_ranges_options(
-            offset=offset,
-            length=length,
-            previous_snapshot_diff=previous_snapshot_diff,
-            **kwargs)
-        try:
-            if previous_snapshot_diff:
-                ranges = await self._client.page_blob.get_page_ranges_diff(**options)
-            else:
-                ranges = await self._client.page_blob.get_page_ranges(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return get_page_ranges_result(ranges)
-
-    @distributed_trace_async
-    async def get_page_range_diff_for_managed_disk(
-            self, previous_snapshot_url,  # type: str
-            offset=None, # type: Optional[int]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
-        """Returns the list of valid page ranges for a managed disk or snapshot.
-
-        .. note::
-            This operation is only available for managed disk accounts.
-
-        .. versionadded:: 12.2.0
-            This operation was introduced in API version '2019-07-07'.
-
-        :param previous_snapshot_url:
-            Specifies the URL of a previous snapshot of the managed disk.
-            The response will only contain pages that were changed between the target blob and
-            its previous snapshot.
-        :param int offset:
-            Start of byte range to use for getting valid page ranges.
-            If no length is given, all bytes after the offset will be searched.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for getting valid page ranges.
-            If length is given, offset must be provided.
-            This range will return valid page ranges from the offset start up to
-            the specified length.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
-            The first element are filled page ranges, the 2nd element is cleared page ranges.
-        :rtype: tuple(list(dict(str, str), list(dict(str, str))
-        """
-        options = self._get_page_ranges_options(
-            offset=offset,
-            length=length,
-            prev_snapshot_url=previous_snapshot_url,
-            **kwargs)
-        try:
-            ranges = await self._client.page_blob.get_page_ranges_diff(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return get_page_ranges_result(ranges)
-
-    @distributed_trace_async
-    async def set_sequence_number( # type: ignore
-            self, sequence_number_action,  # type: Union[str, SequenceNumberAction]
-            sequence_number=None,  # type: Optional[str]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets the blob sequence number.
-
-        :param str sequence_number_action:
-            This property indicates how the service should modify the blob's sequence
-            number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
-        :param str sequence_number:
-            This property sets the blob's sequence number. The sequence number is a
-            user-controlled property that you can use to track requests and manage
-            concurrency issues.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._set_sequence_number_options(
-            sequence_number_action, sequence_number=sequence_number, **kwargs)
-        try:
-            return await self._client.page_blob.update_sequence_number(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def resize_blob(self, size, **kwargs):
-        # type: (int, Any) -> Dict[str, Union[str, datetime]]
-        """Resizes a page blob to the specified size.
-
-        If the specified value is less than the current size of the blob,
-        then all pages above the specified value are cleared.
-
-        :param int size:
-            Size used to resize blob. Maximum size for a page blob is up to 1 TB.
-            The page blob size must be aligned to a 512-byte boundary.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._resize_blob_options(size, **kwargs)
-        try:
-            return await self._client.page_blob.resize(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def upload_page( # type: ignore
-            self, page,  # type: bytes
-            offset,  # type: int
-            length,  # type: int
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """The Upload Pages operation writes a range of pages to a page blob.
-
-        :param bytes page:
-            Content of the page.
-        :param int offset:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https, as https (the default),
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._upload_page_options(
-            page=page,
-            offset=offset,
-            length=length,
-            **kwargs)
-        try:
-            return await self._client.page_blob.upload_pages(**options) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def upload_pages_from_url(self, source_url,  # type: str
-                                    offset,  # type: int
-                                    length,  # type: int
-                                    source_offset,  # type: int
-                                    **kwargs
-                                    ):
-        # type: (...) -> Dict[str, Any]
-        """
-        The Upload Pages operation writes a range of pages to a page blob where
-        the contents are read from a URL.
-
-        :param str source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int offset:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length  must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :param int source_offset:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-            The service will read the same number of bytes as the destination range (length-offset).
-        :keyword bytes source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the source resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the source resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The destination match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-
-        options = self._upload_pages_from_url_options(
-            source_url=source_url,
-            offset=offset,
-            length=length,
-            source_offset=source_offset,
-            **kwargs
-        )
-        try:
-            return await self._client.page_blob.upload_pages_from_url(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def clear_page(self, offset, length, **kwargs):
-        # type: (int, int, Any) -> Dict[str, Union[str, datetime]]
-        """Clears a range of pages.
-
-        :param int offset:
-            Start of byte range to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length  must be a modulus of
-            512.
-        :param int length:
-            Number of bytes to use for writing to a section of the blob.
-            Pages must be aligned with 512-byte boundaries, the start offset
-            must be a modulus of 512 and the length must be a modulus of
-            512.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword int if_sequence_number_lte:
-            If the blob's sequence number is less than or equal to
-            the specified value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_lt:
-            If the blob's sequence number is less than the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword int if_sequence_number_eq:
-            If the blob's sequence number is equal to the specified
-            value, the request proceeds; otherwise it fails.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        options = self._clear_page_options(offset, length, **kwargs)
-        try:
-            return await self._client.page_blob.clear_pages(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def append_block( # type: ignore
-            self, data,  # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime, int]]
-        """Commits a new block of data to the end of the existing append blob.
-
-        :param data:
-            Content of the block.
-        :param int length:
-            Size of the block in bytes.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https, as https (the default),
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
-        :rtype: dict(str, Any)
-        """
-        options = self._append_block_options(
-            data,
-            length=length,
-            **kwargs
-        )
-        try:
-            return await self._client.append_blob.append_block(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async()
-    async def append_block_from_url(self, copy_source_url,  # type: str
-                                    source_offset=None,  # type: Optional[int]
-                                    source_length=None,  # type: Optional[int]
-                                    **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime, int]]
-        """
-        Creates a new block to be committed as part of a blob, where the contents are read from a source url.
-
-        :param str copy_source_url:
-            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
-            shared access signature attached.
-        :param int source_offset:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-        :param int source_length:
-            This indicates the end of the range of bytes that has to be taken from the copy source.
-        :keyword bytearray source_content_md5:
-            If given, the service will calculate the MD5 hash of the block content and compare against this value.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int appendpos_condition:
-            Optional conditional header, used only for the Append Block operation.
-            A number indicating the byte offset to compare. Append Block will
-            succeed only if the append position is equal to this number. If it
-            is not, the request will fail with the
-            AppendPositionConditionNotMet error
-            (HTTP status code 412 - Precondition Failed).
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The destination match condition to use upon the etag.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the source resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the source resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        options = self._append_block_from_url_options(
-            copy_source_url,
-            source_offset=source_offset,
-            source_length=source_length,
-            **kwargs
-        )
-        try:
-            return await self._client.append_blob.append_block_from_url(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_service_client_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_service_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_service_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_blob_service_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,571 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List,
-    TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.pipeline import AsyncPipeline
-from azure.core.tracing.decorator_async import distributed_trace_async
-from azure.core.async_paging import AsyncItemPaged
-
-from .._shared.models import LocationMode
-from .._shared.policies_async import ExponentialRetry
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
-from .._shared.response_handlers import return_response_headers, process_storage_error
-from .._shared.parser import _to_utc_datetime
-from .._shared.response_handlers import parse_to_internal_user_delegation_key
-from .._generated import VERSION
-from .._generated.aio import AzureBlobStorage
-from .._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo
-from .._blob_service_client import BlobServiceClient as BlobServiceClientBase
-from ._container_client_async import ContainerClient
-from ._blob_client_async import BlobClient
-from .._models import ContainerProperties
-from .._deserialize import service_stats_deserialize, service_properties_deserialize
-from .._serialize import get_api_version
-from ._models import ContainerPropertiesPaged
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.pipeline.transport import HttpTransport
-    from azure.core.pipeline.policies import HTTPPolicy
-    from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey
-    from ._lease_async import BlobLeaseClient
-    from .._models import (
-        BlobProperties,
-        PublicAccess,
-        BlobAnalyticsLogging,
-        Metrics,
-        CorsRule,
-        RetentionPolicy,
-        StaticWebsite,
-    )
-
-
-class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase):
-    """A client to interact with the Blob Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete containers within the account.
-    For operations relating to a specific container or blob, clients for those entities
-    can also be retrieved using the `get_client` functions.
-
-    :param str account_url:
-        The URL to the blob storage account. Any other entities included
-        in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.2.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
-        Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be
-        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
-        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
-    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
-        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
-    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
-    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
-        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
-    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
-        or 4MB.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_authentication_async.py
-            :start-after: [START create_blob_service_client]
-            :end-before: [END create_blob_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/blob_samples_authentication_async.py
-            :start-after: [START create_blob_service_client_oauth]
-            :end-before: [END create_blob_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the BlobServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        super(BlobServiceClient, self).__init__(
-            account_url,
-            credential=credential,
-            **kwargs)
-        self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = kwargs.get('loop', None)
-
-    @distributed_trace_async
-    async def get_user_delegation_key(self, key_start_time,  # type: datetime
-                                      key_expiry_time,  # type: datetime
-                                      **kwargs  # type: Any
-                                      ):
-        # type: (...) -> UserDelegationKey
-        """
-        Obtain a user delegation key for the purpose of signing SAS tokens.
-        A token credential must be present on the service object for this request to succeed.
-
-        :param ~datetime.datetime key_start_time:
-            A DateTime value. Indicates when the key becomes valid.
-        :param ~datetime.datetime key_expiry_time:
-            A DateTime value. Indicates when the key stops being valid.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The user delegation key.
-        :rtype: ~azure.storage.blob.UserDelegationKey
-        """
-        key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info,
-                                                                                     timeout=timeout,
-                                                                                     **kwargs)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-        return parse_to_internal_user_delegation_key(user_delegation_key)  # type: ignore
-
-    @distributed_trace_async
-    async def get_account_information(self, **kwargs):
-        # type: (Any) -> Dict[str, str]
-        """Gets information related to the storage account.
-
-        The information can also be retrieved if the user has a SAS to a container or blob.
-        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
-
-        :returns: A dict of account information (SKU and account type).
-        :rtype: dict(str, str)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START get_blob_service_account_info]
-                :end-before: [END get_blob_service_account_info]
-                :language: python
-                :dedent: 12
-                :caption: Getting account information for the blob service.
-        """
-        try:
-            return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_service_stats(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Retrieves statistics related to replication for the Blob service.
-
-        It is only available when read-access geo-redundant replication is enabled for
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable
-        in two locations. In both locations, Azure Storage constantly maintains
-        multiple healthy replicas of your data. The location where you read,
-        create, update, or delete data is the primary storage account location.
-        The primary location exists in the region you choose at the time you
-        create an account via the Azure Management Azure classic portal, for
-        example, North Central US. The location to which your data is replicated
-        is the secondary location. The secondary location is automatically
-        determined based on the location of the primary; it is in a second data
-        center that resides in the same region as the primary location. Read-only
-        access is available from the secondary location, if read-access geo-redundant
-        replication is enabled for your storage account.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The blob service stats.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START get_blob_service_stats]
-                :end-before: [END get_blob_service_stats]
-                :language: python
-                :dedent: 12
-                :caption: Getting service stats for the blob service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            stats = await self._client.service.get_statistics( # type: ignore
-                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
-            return service_stats_deserialize(stats)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_service_properties(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An object containing blob service properties such as
-            analytics logging, hour/minute metrics, cors rules, etc.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START get_blob_service_properties]
-                :end-before: [END get_blob_service_properties]
-                :language: python
-                :dedent: 12
-                :caption: Getting service properties for the blob service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
-            return service_properties_deserialize(service_props)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_service_properties(
-            self, analytics_logging=None,  # type: Optional[BlobAnalyticsLogging]
-            hour_metrics=None,  # type: Optional[Metrics]
-            minute_metrics=None,  # type: Optional[Metrics]
-            cors=None,  # type: Optional[List[CorsRule]]
-            target_version=None,  # type: Optional[str]
-            delete_retention_policy=None,  # type: Optional[RetentionPolicy]
-            static_website=None,  # type: Optional[StaticWebsite]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Sets the properties of a storage account's Blob service, including
-        Azure Storage Analytics.
-
-        If an element (e.g. analytics_logging) is left as None, the
-        existing settings on the service for that functionality are preserved.
-
-        :param analytics_logging:
-            Groups the Azure Analytics Logging settings.
-        :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request
-            statistics grouped by API in hourly aggregates for blobs.
-        :type hour_metrics: ~azure.storage.blob.Metrics
-        :param minute_metrics:
-            The minute metrics settings provide request statistics
-            for each minute for blobs.
-        :type minute_metrics: ~azure.storage.blob.Metrics
-        :param cors:
-            You can include up to five CorsRule elements in the
-            list. If an empty list is specified, all CORS rules will be deleted,
-            and CORS will be disabled for the service.
-        :type cors: list[~azure.storage.blob.CorsRule]
-        :param str target_version:
-            Indicates the default version to use for requests if an incoming
-            request's version is not specified.
-        :param delete_retention_policy:
-            The delete retention policy specifies whether to retain deleted blobs.
-            It also specifies the number of days and versions of blob to keep.
-        :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
-        :param static_website:
-            Specifies whether the static website feature is enabled,
-            and if yes, indicates the index document and 404 error document to use.
-        :type static_website: ~azure.storage.blob.StaticWebsite
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START set_blob_service_properties]
-                :end-before: [END set_blob_service_properties]
-                :language: python
-                :dedent: 12
-                :caption: Setting service properties for the blob service.
-        """
-        props = StorageServiceProperties(
-            logging=analytics_logging,
-            hour_metrics=hour_metrics,
-            minute_metrics=minute_metrics,
-            cors=cors,
-            default_service_version=target_version,
-            delete_retention_policy=delete_retention_policy,
-            static_website=static_website
-        )
-        timeout = kwargs.pop('timeout', None)
-        try:
-            await self._client.service.set_properties(props, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_containers(
-            self, name_starts_with=None,  # type: Optional[str]
-            include_metadata=False,  # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> AsyncItemPaged[ContainerProperties]
-        """Returns a generator to list the containers under the specified account.
-
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all containers have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only containers whose names
-            begin with the specified prefix.
-        :param bool include_metadata:
-            Specifies that container metadata to be returned in the response.
-            The default value is `False`.
-        :keyword int results_per_page:
-            The maximum number of container names to retrieve per API
-            call. If the request does not specify the server will return up to 5,000 items.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) of ContainerProperties.
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START bsc_list_containers]
-                :end-before: [END bsc_list_containers]
-                :language: python
-                :dedent: 16
-                :caption: Listing the containers in the blob service.
-        """
-        include = 'metadata' if include_metadata else None
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.service.list_containers_segment,
-            prefix=name_starts_with,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command,
-            prefix=name_starts_with,
-            results_per_page=results_per_page,
-            page_iterator_class=ContainerPropertiesPaged
-        )
-
-    @distributed_trace_async
-    async def create_container(
-            self, name,  # type: str
-            metadata=None,  # type: Optional[Dict[str, str]]
-            public_access=None,  # type: Optional[Union[PublicAccess, str]]
-            **kwargs
-        ):
-        # type: (...) -> ContainerClient
-        """Creates a new container under the specified account.
-
-        If the container with the same name already exists, a ResourceExistsError will
-        be raised. This method returns a client with which to interact with the newly
-        created container.
-
-        :param str name: The name of the container to create.
-        :param metadata:
-            A dict with name-value pairs to associate with the
-            container as metadata. Example: `{'Category':'test'}`
-        :type metadata: dict(str, str)
-        :param public_access:
-            Possible values include: 'container', 'blob'.
-        :type public_access: str or ~azure.storage.blob.PublicAccess
-        :keyword container_encryption_scope:
-            Specifies the default encryption scope to set on the container and use for
-            all future writes.
-
-            .. versionadded:: 12.2.0
-
-        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.blob.aio.ContainerClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START bsc_create_container]
-                :end-before: [END bsc_create_container]
-                :language: python
-                :dedent: 16
-                :caption: Creating a container in the blob service.
-        """
-        container = self.get_container_client(name)
-        timeout = kwargs.pop('timeout', None)
-        kwargs.setdefault('merge_span', True)
-        await container.create_container(
-            metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
-        return container
-
-    @distributed_trace_async
-    async def delete_container(
-            self, container,  # type: Union[ContainerProperties, str]
-            lease=None,  # type: Optional[Union[BlobLeaseClient, str]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified container for deletion.
-
-        The container and any blobs contained within it are later deleted during garbage collection.
-        If the container is not found, a ResourceNotFoundError will be raised.
-
-        :param container:
-            The container to delete. This can either be the name of the container,
-            or an instance of ContainerProperties.
-        :type container: str or ~azure.storage.blob.ContainerProperties
-        :param lease:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START bsc_delete_container]
-                :end-before: [END bsc_delete_container]
-                :language: python
-                :dedent: 16
-                :caption: Deleting a container in the blob service.
-        """
-        container = self.get_container_client(container) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        await container.delete_container( # type: ignore
-            lease=lease,
-            timeout=timeout,
-            **kwargs)
-
-    def get_container_client(self, container):
-        # type: (Union[ContainerProperties, str]) -> ContainerClient
-        """Get a client to interact with the specified container.
-
-        The container need not already exist.
-
-        :param container:
-            The container. This can either be the name of the container,
-            or an instance of ContainerProperties.
-        :type container: str or ~azure.storage.blob.ContainerProperties
-        :returns: A ContainerClient.
-        :rtype: ~azure.storage.blob.aio.ContainerClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START bsc_get_container_client]
-                :end-before: [END bsc_get_container_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the container client to interact with a specific container.
-        """
-        try:
-            container_name = container.name
-        except AttributeError:
-            container_name = container
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ContainerClient(
-            self.url, container_name=container_name,
-            credential=self.credential, api_version=self.api_version, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-            require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function, loop=self._loop)
-
-    def get_blob_client(
-            self, container,  # type: Union[ContainerProperties, str]
-            blob,  # type: Union[BlobProperties, str]
-            snapshot=None  # type: Optional[Union[Dict[str, Any], str]]
-        ):
-        # type: (...) -> BlobClient
-        """Get a client to interact with the specified blob.
-
-        The blob need not already exist.
-
-        :param container:
-            The container that the blob is in. This can either be the name of the container,
-            or an instance of ContainerProperties.
-        :type container: str or ~azure.storage.blob.ContainerProperties
-        :param blob:
-            The blob with which to interact. This can either be the name of the blob,
-            or an instance of BlobProperties.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param snapshot:
-            The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
-            or a dictionary output returned by
-            :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`.
-        :type snapshot: str or dict(str, Any)
-        :returns: A BlobClient.
-        :rtype: ~azure.storage.blob.aio.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_service_async.py
-                :start-after: [START bsc_get_blob_client]
-                :end-before: [END bsc_get_blob_client]
-                :language: python
-                :dedent: 16
-                :caption: Getting the blob client to interact with a specific blob.
-        """
-        try:
-            container_name = container.name
-        except AttributeError:
-            container_name = container
-
-        try:
-            blob_name = blob.name
-        except AttributeError:
-            blob_name = blob
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return BlobClient( # type: ignore
-            self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
-            credential=self.credential, api_version=self.api_version, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-            require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function, loop=self._loop)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_container_client_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_container_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_container_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_container_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1108 +0,0 @@
-# pylint: disable=too-many-lines
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, AsyncIterator,
-    TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.tracing.decorator_async import distributed_trace_async
-from azure.core.async_paging import AsyncItemPaged
-from azure.core.pipeline import AsyncPipeline
-from azure.core.pipeline.transport import HttpRequest, AsyncHttpResponse
-
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
-from .._shared.policies_async import ExponentialRetry
-from .._shared.request_handlers import add_metadata_headers, serialize_iso
-from .._shared.response_handlers import (
-    process_storage_error,
-    return_response_headers,
-    return_headers_and_deserialized)
-from .._generated import VERSION
-from .._generated.aio import AzureBlobStorage
-from .._generated.models import (
-    StorageErrorException,
-    SignedIdentifier)
-from .._deserialize import deserialize_container_properties
-from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version
-from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name
-from .._lease import get_access_conditions
-from .._models import ContainerProperties, BlobProperties, BlobType  # pylint: disable=unused-import
-from ._models import BlobPropertiesPaged, BlobPrefix
-from ._lease_async import BlobLeaseClient
-from ._blob_client_async import BlobClient
-
-if TYPE_CHECKING:
-    from azure.core.pipeline.transport import HttpTransport
-    from azure.core.pipeline.policies import HTTPPolicy
-    from .._models import ContainerSasPermissions, PublicAccess
-    from ._download_async import StorageStreamDownloader
-    from datetime import datetime
-    from .._models import ( # pylint: disable=unused-import
-        AccessPolicy,
-        ContentSettings,
-        StandardBlobTier,
-        PremiumPageBlobTier)
-
-
-class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase):
-    """A client to interact with a specific container, although that container
-    may not yet exist.
-
-    For operations relating to a specific blob within this container, a blob client can be
-    retrieved using the :func:`~get_blob_client` function.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the container,
-        use the :func:`from_container_url` classmethod.
-    :param container_name:
-        The name of the container for the blob.
-    :type container_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.2.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
-        Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be
-        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
-        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
-    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
-        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
-    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
-    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
-    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
-        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
-    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
-        or 4MB.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/blob_samples_containers_async.py
-            :start-after: [START create_container_client_from_service]
-            :end-before: [END create_container_client_from_service]
-            :language: python
-            :dedent: 8
-            :caption: Get a ContainerClient from an existing BlobServiceClient.
-
-        .. literalinclude:: ../samples/blob_samples_containers_async.py
-            :start-after: [START create_container_client_sasurl]
-            :end-before: [END create_container_client_sasurl]
-            :language: python
-            :dedent: 12
-            :caption: Creating the container client directly.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            container_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        super(ContainerClient, self).__init__(
-            account_url,
-            container_name=container_name,
-            credential=credential,
-            **kwargs)
-        self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = kwargs.get('loop', None)
-
-    @distributed_trace_async
-    async def create_container(self, metadata=None, public_access=None, **kwargs):
-        # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None
-        """
-        Creates a new container under the specified account. If the container
-        with the same name already exists, the operation fails.
-
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            container as metadata. Example:{'Category':'test'}
-        :type metadata: dict[str, str]
-        :param ~azure.storage.blob.PublicAccess public_access:
-            Possible values include: 'container', 'blob'.
-        :keyword container_encryption_scope:
-            Specifies the default encryption scope to set on the container and use for
-            all future writes.
-
-            .. versionadded:: 12.2.0
-
-        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START create_container]
-                :end-before: [END create_container]
-                :language: python
-                :dedent: 16
-                :caption: Creating a container to store blobs.
-        """
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        timeout = kwargs.pop('timeout', None)
-        container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
-        try:
-            return await self._client.container.create( # type: ignore
-                timeout=timeout,
-                access=public_access,
-                container_cpk_scope_info=container_cpk_scope_info,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def delete_container(
-            self, **kwargs):
-        # type: (Any) -> None
-        """
-        Marks the specified container for deletion. The container and any blobs
-        contained within it are later deleted during garbage collection.
-
-        :keyword lease:
-            If specified, delete_container only succeeds if the
-            container's lease is active and matches this ID.
-            Required if the container has an active lease.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START delete_container]
-                :end-before: [END delete_container]
-                :language: python
-                :dedent: 16
-                :caption: Delete a container.
-        """
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        mod_conditions = get_modify_conditions(kwargs)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            await self._client.container.delete(
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def acquire_lease(
-            self, lease_duration=-1,  # type: int
-            lease_id=None,  # type: Optional[str]
-            **kwargs):
-        # type: (...) -> BlobLeaseClient
-        """
-        Requests a new lease. If the container does not have an active lease,
-        the Blob service creates a lease on the container and returns a new
-        lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A BlobLeaseClient object, that can be run in a context manager.
-        :rtype: ~azure.storage.blob.aio.BlobLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START acquire_lease_on_container]
-                :end-before: [END acquire_lease_on_container]
-                :language: python
-                :dedent: 12
-                :caption: Acquiring a lease on the container.
-        """
-        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
-        return lease
-
-    @distributed_trace_async
-    async def get_account_information(self, **kwargs):
-        # type: (**Any) -> Dict[str, str]
-        """Gets information related to the storage account.
-
-        The information can also be retrieved if the user has a SAS to a container or blob.
-        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
-
-        :returns: A dict of account information (SKU and account type).
-        :rtype: dict(str, str)
-        """
-        try:
-            return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_container_properties(self, **kwargs):
-        # type: (**Any) -> ContainerProperties
-        """Returns all user-defined metadata and system properties for the specified
-        container. The data returned does not include the container's list of blobs.
-
-        :keyword lease:
-            If specified, get_container_properties only succeeds if the
-            container's lease is active and matches this ID.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Properties for the specified container within a container object.
-        :rtype: ~azure.storage.blob.ContainerProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START get_container_properties]
-                :end-before: [END get_container_properties]
-                :language: python
-                :dedent: 16
-                :caption: Getting properties on the container.
-        """
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response = await self._client.container.get_properties(
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                cls=deserialize_container_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        response.name = self.container_name
-        return response # type: ignore
-
-    @distributed_trace_async
-    async def set_container_metadata( # type: ignore
-            self, metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets one or more user-defined name-value pairs for the specified
-        container. Each call to this operation replaces all existing metadata
-        attached to the container. To remove all metadata from the container,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the container as
-            metadata. Example: {'category':'test'}
-        :type metadata: dict[str, str]
-        :keyword lease:
-            If specified, set_container_metadata only succeeds if the
-            container's lease is active and matches this ID.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Container-updated property dict (Etag and last modified).
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START set_container_metadata]
-                :end-before: [END set_container_metadata]
-                :language: python
-                :dedent: 16
-                :caption: Setting metadata on the container.
-        """
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        mod_conditions = get_modify_conditions(kwargs)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return await self._client.container.set_metadata( # type: ignore
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_container_access_policy(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the permissions for the specified container.
-        The permissions indicate whether container data may be accessed publicly.
-
-        :keyword lease:
-            If specified, get_container_access_policy only succeeds if the
-            container's lease is active and matches this ID.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Access policy information in a dict.
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START get_container_access_policy]
-                :end-before: [END get_container_access_policy]
-                :language: python
-                :dedent: 16
-                :caption: Getting the access policy on the container.
-        """
-        lease = kwargs.pop('lease', None)
-        access_conditions = get_access_conditions(lease)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response, identifiers = await self._client.container.get_access_policy(
-                timeout=timeout,
-                lease_access_conditions=access_conditions,
-                cls=return_headers_and_deserialized,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return {
-            'public_access': response.get('blob_public_access'),
-            'signed_identifiers': identifiers or []
-        }
-
-    @distributed_trace_async
-    async def set_container_access_policy(
-            self, signed_identifiers,  # type: Dict[str, AccessPolicy]
-            public_access=None,  # type: Optional[Union[str, PublicAccess]]
-            **kwargs  # type: Any
-        ):  # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets the permissions for the specified container or stored access
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether blobs in a container may be accessed publicly.
-
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the container. The
-            dictionary may contain up to 5 elements. An empty dictionary
-            will clear the access policies set on the service.
-        :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
-        :param ~azure.storage.blob.PublicAccess public_access:
-            Possible values include: 'container', 'blob'.
-        :keyword lease:
-            Required if the container has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified date/time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A datetime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Container-updated property dict (Etag and last modified).
-        :rtype: dict[str, str or ~datetime.datetime]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START set_container_access_policy]
-                :end-before: [END set_container_access_policy]
-                :language: python
-                :dedent: 16
-                :caption: Setting access policy on the container.
-        """
-        timeout = kwargs.pop('timeout', None)
-        lease = kwargs.pop('lease', None)
-        if len(signed_identifiers) > 5:
-            raise ValueError(
-                'Too many access policies provided. The server does not support setting '
-                'more than 5 access policies on a single resource.')
-        identifiers = []
-        for key, value in signed_identifiers.items():
-            if value:
-                value.start = serialize_iso(value.start)
-                value.expiry = serialize_iso(value.expiry)
-            identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
-        signed_identifiers = identifiers # type: ignore
-
-        mod_conditions = get_modify_conditions(kwargs)
-        access_conditions = get_access_conditions(lease)
-        try:
-            return await self._client.container.set_access_policy(
-                container_acl=signed_identifiers or None,
-                timeout=timeout,
-                access=public_access,
-                lease_access_conditions=access_conditions,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_blobs(self, name_starts_with=None, include=None, **kwargs):
-        # type: (Optional[str], Optional[Any], **Any) -> AsyncItemPaged[BlobProperties]
-        """Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service.
-
-        :param str name_starts_with:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param list[str] include:
-            Specifies one or more additional datasets to include in the response.
-            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) response of BlobProperties.
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START list_blobs_in_container]
-                :end-before: [END list_blobs_in_container]
-                :language: python
-                :dedent: 12
-                :caption: List the blobs in the container.
-        """
-        if include and not isinstance(include, list):
-            include = [include]
-
-        results_per_page = kwargs.pop('results_per_page', None)
-        timeout = kwargs.pop('timeout', None)
-        command = functools.partial(
-            self._client.container.list_blob_flat_segment,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command,
-            prefix=name_starts_with,
-            results_per_page=results_per_page,
-            page_iterator_class=BlobPropertiesPaged
-        )
-
-    @distributed_trace
-    def walk_blobs(
-            self, name_starts_with=None, # type: Optional[str]
-            include=None, # type: Optional[Any]
-            delimiter="/", # type: str
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> AsyncItemPaged[BlobProperties]
-        """Returns a generator to list the blobs under the specified container.
-        The generator will lazily follow the continuation tokens returned by
-        the service. This operation will list blobs in accordance with a hierarchy,
-        as delimited by the specified delimiter character.
-
-        :param str name_starts_with:
-            Filters the results to return only blobs whose names
-            begin with the specified prefix.
-        :param list[str] include:
-            Specifies one or more additional datasets to include in the response.
-            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
-        :param str delimiter:
-            When the request includes this parameter, the operation returns a BlobPrefix
-            element in the response body that acts as a placeholder for all blobs whose
-            names begin with the same substring up to the appearance of the delimiter
-            character. The delimiter may be a single character or a string.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) response of BlobProperties.
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
-        """
-        if include and not isinstance(include, list):
-            include = [include]
-
-        results_per_page = kwargs.pop('results_per_page', None)
-        timeout = kwargs.pop('timeout', None)
-        command = functools.partial(
-            self._client.container.list_blob_hierarchy_segment,
-            delimiter=delimiter,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return BlobPrefix(
-            command,
-            prefix=name_starts_with,
-            results_per_page=results_per_page,
-            delimiter=delimiter)
-
-    @distributed_trace_async
-    async def upload_blob(
-            self, name,  # type: Union[str, BlobProperties]
-            data,  # type: Union[Iterable[AnyStr], IO[AnyStr]]
-            blob_type=BlobType.BlockBlob,  # type: Union[str, BlobType]
-            length=None,  # type: Optional[int]
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> BlobClient
-        """Creates a new blob from a data source with automatic chunking.
-
-        :param name: The blob with which to interact. If specified, this value will override
-            a blob value specified in the blob URL.
-        :type name: str or ~azure.storage.blob.BlobProperties
-        :param data: The blob data to upload.
-        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
-            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
-            If True, upload_blob will overwrite the existing data. If set to False, the
-            operation will fail with ResourceExistsError. The exception to the above is with Append
-            blob types: if set to False and the data already exists, an error will not be raised
-            and the data will be appended to the existing blob. If set overwrite=True, then the existing
-            append blob will be deleted, and a new one created. Defaults to False.
-        :keyword ~azure.storage.blob.ContentSettings content_settings:
-            ContentSettings object used to set blob properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the container has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
-            A standard blob tier value to set the blob to. For this version of the library,
-            this is only applicable to block blobs on standard storage accounts.
-        :keyword int maxsize_condition:
-            Optional conditional header. The max length in bytes permitted for
-            the append blob. If the Append Block operation would cause the blob
-            to exceed that limit or if the blob size is already greater than the
-            value specified in this header, the request will fail with
-            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use when the blob size exceeds
-            64MB.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword str encryption_scope:
-            A predefined encryption scope used to encrypt the data on the service. An encryption
-            scope can be created using the Management API and referenced here by name. If a default
-            encryption scope has been defined at the container, this value will override it if the
-            container-level scope is configured to allow overrides. Otherwise an error will be raised.
-
-            .. versionadded:: 12.2.0
-
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: A BlobClient to interact with the newly uploaded blob.
-        :rtype: ~azure.storage.blob.aio.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START upload_blob_to_container]
-                :end-before: [END upload_blob_to_container]
-                :language: python
-                :dedent: 12
-                :caption: Upload blob to the container.
-        """
-        blob = self.get_blob_client(name)
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        await blob.upload_blob(
-            data,
-            blob_type=blob_type,
-            length=length,
-            metadata=metadata,
-            timeout=timeout,
-            encoding=encoding,
-            **kwargs
-        )
-        return blob
-
-    @distributed_trace_async
-    async def delete_blob(
-            self, blob,  # type: Union[str, BlobProperties]
-            delete_snapshots=None,  # type: Optional[str]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified blob or snapshot for deletion.
-
-        The blob is later deleted during garbage collection.
-        Note that in order to delete a blob, you must delete all of its
-        snapshots. You can delete both at the same time with the delete_blob
-        operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
-        and retains the blob or snapshot for specified number of days.
-        After specified number of days, blob's data is removed from the service during garbage collection.
-        Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]`
-        option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()`
-
-        :param blob: The blob with which to interact. If specified, this value will override
-            a blob value specified in the blob URL.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param str delete_snapshots:
-            Required if the blob has associated snapshots. Values include:
-             - "only": Deletes only the blobs snapshots.
-             - "include": Deletes the blob along with all snapshots.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a Lease object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        blob = self.get_blob_client(blob) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        timeout = kwargs.pop('timeout', None)
-        await blob.delete_blob( # type: ignore
-            delete_snapshots=delete_snapshots,
-            timeout=timeout,
-            **kwargs)
-
-    @distributed_trace_async
-    async def download_blob(self, blob, offset=None, length=None, **kwargs):
-        # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader
-        """Downloads a blob to the StorageStreamDownloader. The readall() method must
-        be used to read all the content or readinto() must be used to download the blob into
-        a stream.
-
-        :param blob: The blob with which to interact. If specified, this value will override
-            a blob value specified in the blob URL.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param int offset:
-            Start of byte range to use for downloading a section of the blob.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the blob. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https, as https (the default), will
-            already validate. Note that this MD5 hash is not stored with the
-            blob. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the blob has an active lease. If specified, download_blob only
-            succeeds if the blob's lease is active and matches this ID. Value can be a
-            BlobLeaseClient object or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
-            Encrypts the data on the service-side with the given key.
-            Use of customer-provided keys must be done over HTTPS.
-            As the encryption key itself is provided in the request,
-            a secure connection must be established to transfer the key.
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :keyword str encoding:
-            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: A streaming object. (StorageStreamDownloader)
-        :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
-        """
-        blob_client = self.get_blob_client(blob) # type: ignore
-        kwargs.setdefault('merge_span', True)
-        return await blob_client.download_blob(
-            offset=offset,
-            length=length,
-            **kwargs)
-
-    @distributed_trace_async
-    async def delete_blobs(  # pylint: disable=arguments-differ
-            self, *blobs: Union[str, BlobProperties],
-            delete_snapshots: Optional[str] = None,
-            lease: Optional[Union[str, BlobLeaseClient]] = None,
-            **kwargs
-        ) -> AsyncIterator[AsyncHttpResponse]:
-        """Marks the specified blobs or snapshots for deletion.
-
-        The blobs are later deleted during garbage collection.
-        Note that in order to delete blobs, you must delete all of their
-        snapshots. You can delete both at the same time with the delete_blobs operation.
-
-        If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
-        and retains the blobs or snapshots for specified number of days.
-        After specified number of days, blobs' data is removed from the service during garbage collection.
-        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
-        Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()`
-
-        :param blobs: The blob names with which to interact. This can be a single blob, or multiple values can
-            be supplied, where each value is either the name of the blob (str) or BlobProperties.
-        :type blobs: str or ~azure.storage.blob.BlobProperties
-        :param str delete_snapshots:
-            Required if a blob has associated snapshots. Values include:
-             - "only": Deletes only the blobs snapshots.
-             - "include": Deletes the blob along with all snapshots.
-        :param lease:
-            Required if a blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword bool raise_on_any_failure:
-            This is a boolean param which defaults to True. When this is set, an exception
-            is raised even if there is a single operation failure. For optimal performance,
-            this should be set to False
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: An async iterator of responses, one for each blob in order
-        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common_async.py
-                :start-after: [START delete_multiple_blobs]
-                :end-before: [END delete_multiple_blobs]
-                :language: python
-                :dedent: 12
-                :caption: Deleting multiple blobs.
-        """
-        raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
-        timeout = kwargs.pop('timeout', None)
-        options = BlobClient._generic_delete_blob_options(  # pylint: disable=protected-access
-            delete_snapshots=delete_snapshots,
-            lease=lease,
-            timeout=timeout,
-            **kwargs
-        )
-        options.update({'raise_on_any_failure': raise_on_any_failure})
-        query_parameters, header_parameters = self._generate_delete_blobs_options(**options)
-        # To pass kwargs to "_batch_send", we need to remove anything that was
-        # in the Autorest signature for Autorest, otherwise transport will be upset
-        for possible_param in ['timeout', 'delete_snapshots', 'lease_access_conditions', 'modified_access_conditions']:
-            options.pop(possible_param, None)
-
-        reqs = []
-        for blob in blobs:
-            blob_name = _get_blob_name(blob)
-            req = HttpRequest(
-                "DELETE",
-                "/{}/{}".format(self.container_name, blob_name),
-                headers=header_parameters
-            )
-            req.format_parameters(query_parameters)
-            reqs.append(req)
-
-        return await self._batch_send(*reqs, **options)
-
-    @distributed_trace
-    async def set_standard_blob_tier_blobs(
-        self,
-        standard_blob_tier: Union[str, 'StandardBlobTier'],
-        *blobs: Union[str, BlobProperties],
-        **kwargs
-    ) -> AsyncIterator[AsyncHttpResponse]:
-        """This operation sets the tier on block blobs.
-
-        A block blob's tier determines Hot/Cool/Archive storage type.
-        This operation does not update the blob's ETag.
-
-        :param standard_blob_tier:
-            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
-            'Archive'. The hot tier is optimized for storing data that is accessed
-            frequently. The cool storage tier is optimized for storing data that
-            is infrequently accessed and stored for at least a month. The archive
-            tier is optimized for storing data that is rarely accessed and stored
-            for at least six months with flexible latency requirements.
-        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
-        :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
-            be supplied, where each value is either the name of the blob (str) or BlobProperties.
-        :type blobs: str or ~azure.storage.blob.BlobProperties
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword bool raise_on_any_failure:
-            This is a boolean param which defaults to True. When this is set, an exception
-            is raised even if there is a single operation failure. For optimal performance,
-            this should be set to False.
-        :return: An async iterator of responses, one for each blob in order
-        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if standard_blob_tier is None:
-            raise ValueError("A StandardBlobTier must be specified")
-
-        query_parameters, header_parameters = self._generate_set_tier_options(
-            tier=standard_blob_tier,
-            lease_access_conditions=access_conditions,
-            **kwargs
-        )
-        # To pass kwargs to "_batch_send", we need to remove anything that was
-        # in the Autorest signature for Autorest, otherwise transport will be upset
-        for possible_param in ['timeout', 'lease']:
-            kwargs.pop(possible_param, None)
-
-        reqs = []
-        for blob in blobs:
-            blob_name = _get_blob_name(blob)
-            req = HttpRequest(
-                "PUT",
-                "/{}/{}".format(self.container_name, blob_name),
-                headers=header_parameters
-            )
-            req.format_parameters(query_parameters)
-            reqs.append(req)
-
-        return await self._batch_send(*reqs, **kwargs)
-
-    @distributed_trace
-    async def set_premium_page_blob_tier_blobs(
-        self,
-        premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'],
-        *blobs: Union[str, BlobProperties],
-        **kwargs
-    ) -> AsyncIterator[AsyncHttpResponse]:
-        """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts.
-
-        :param premium_page_blob_tier:
-            A page blob tier value to set the blob to. The tier correlates to the size of the
-            blob and number of allowed IOPS. This is only applicable to page blobs on
-            premium storage accounts.
-        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
-        :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
-            be supplied, where each value is either the name of the blob (str) or BlobProperties.
-        :type blobs: str or ~azure.storage.blob.BlobProperties
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a BlobLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
-        :keyword bool raise_on_any_failure:
-            This is a boolean param which defaults to True. When this is set, an exception
-            is raised even if there is a single operation failure. For optimal performance,
-            this should be set to False.
-        :return: An async iterator of responses, one for each blob in order
-        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        if premium_page_blob_tier is None:
-            raise ValueError("A PremiumPageBlobTier must be specified")
-
-        query_parameters, header_parameters = self._generate_set_tier_options(
-            tier=premium_page_blob_tier,
-            lease_access_conditions=access_conditions,
-            **kwargs
-        )
-        # To pass kwargs to "_batch_send", we need to remove anything that was
-        # in the Autorest signature for Autorest, otherwise transport will be upset
-        for possible_param in ['timeout', 'lease']:
-            kwargs.pop(possible_param, None)
-
-        reqs = []
-        for blob in blobs:
-            blob_name = _get_blob_name(blob)
-            req = HttpRequest(
-                "PUT",
-                "/{}/{}".format(self.container_name, blob_name),
-                headers=header_parameters
-            )
-            req.format_parameters(query_parameters)
-            reqs.append(req)
-
-        return await self._batch_send(*reqs, **kwargs)
-
-    def get_blob_client(
-            self, blob,  # type: Union[BlobProperties, str]
-            snapshot=None  # type: str
-        ):
-        # type: (...) -> BlobClient
-        """Get a client to interact with the specified blob.
-
-        The blob need not already exist.
-
-        :param blob:
-            The blob with which to interact.
-        :type blob: str or ~azure.storage.blob.BlobProperties
-        :param str snapshot:
-            The optional blob snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`~BlobClient.create_snapshot()`.
-        :returns: A BlobClient.
-        :rtype: ~azure.storage.blob.aio.BlobClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_containers_async.py
-                :start-after: [START get_blob_client]
-                :end-before: [END get_blob_client]
-                :language: python
-                :dedent: 12
-                :caption: Get the blob client.
-        """
-        blob_name = _get_blob_name(blob)
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return BlobClient(
-            self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
-            credential=self.credential, api_version=self.api_version, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-            require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function, loop=self._loop)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_download_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_download_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_download_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_download_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,490 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import asyncio
-import sys
-from io import BytesIO
-from itertools import islice
-import warnings
-
-from azure.core.exceptions import HttpResponseError
-from .._shared.encryption import decrypt_blob
-from .._shared.request_handlers import validate_and_format_range_headers
-from .._shared.response_handlers import process_storage_error, parse_length_from_content_range
-from .._deserialize import get_page_ranges_result
-from .._download import process_range_and_offset, _ChunkDownloader
-
-
-async def process_content(data, start_offset, end_offset, encryption):
-    if data is None:
-        raise ValueError("Response cannot be None.")
-    try:
-        content = data.response.body()
-    except Exception as error:
-        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
-    if encryption.get('key') is not None or encryption.get('resolver') is not None:
-        try:
-            return decrypt_blob(
-                encryption.get('required'),
-                encryption.get('key'),
-                encryption.get('resolver'),
-                content,
-                start_offset,
-                end_offset,
-                data.response.headers)
-        except Exception as error:
-            raise HttpResponseError(
-                message="Decryption failed.",
-                response=data.response,
-                error=error)
-    return content
-
-
-class _AsyncChunkDownloader(_ChunkDownloader):
-    def __init__(self, **kwargs):
-        super(_AsyncChunkDownloader, self).__init__(**kwargs)
-        self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None
-        self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None
-
-    async def process_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
-        length = chunk_end - chunk_start
-        if length > 0:
-            await self._write_to_stream(chunk_data, chunk_start)
-            await self._update_progress(length)
-
-    async def yield_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        return await self._download_chunk(chunk_start, chunk_end - 1)
-
-    async def _update_progress(self, length):
-        if self.progress_lock:
-            async with self.progress_lock:  # pylint: disable=not-async-context-manager
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    async def _write_to_stream(self, chunk_data, chunk_start):
-        if self.stream_lock:
-            async with self.stream_lock:  # pylint: disable=not-async-context-manager
-                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-                self.stream.write(chunk_data)
-        else:
-            self.stream.write(chunk_data)
-
-    async def _download_chunk(self, chunk_start, chunk_end):
-        download_range, offset = process_range_and_offset(
-            chunk_start, chunk_end, chunk_end, self.encryption_options)
-
-        # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
-        # Do optimize and create empty chunk locally if condition is met.
-        if self._do_optimize(download_range[0], download_range[1]):
-            chunk_data = b"\x00" * self.chunk_size
-        else:
-            range_header, range_validation = validate_and_format_range_headers(
-                download_range[0],
-                download_range[1],
-                check_content_md5=self.validate_content
-            )
-            try:
-                _, response = await self.client.download(
-                    range=range_header,
-                    range_get_content_md5=range_validation,
-                    validate_content=self.validate_content,
-                    data_stream_total=self.total_size,
-                    download_stream_current=self.progress_total,
-                    **self.request_options
-                )
-            except HttpResponseError as error:
-                process_storage_error(error)
-
-            chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options)
-
-            # This makes sure that if_match is set so that we can validate
-            # that subsequent downloads are to an unmodified blob
-            if self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = response.properties.etag
-
-        return chunk_data
-
-
-class _AsyncChunkIterator(object):
-    """Async iterator for chunks in blob download stream."""
-
-    def __init__(self, size, content, downloader):
-        self.size = size
-        self._current_content = content
-        self._iter_downloader = downloader
-        self._iter_chunks = None
-        self._complete = (size == 0)
-
-    def __len__(self):
-        return self.size
-
-    def __iter__(self):
-        raise TypeError("Async stream must be iterated asynchronously.")
-
-    def __aiter__(self):
-        return self
-
-    async def __anext__(self):
-        """Iterate through responses."""
-        if self._complete:
-            raise StopAsyncIteration("Download complete")
-        if not self._iter_downloader:
-            # If no iterator was supplied, the download completed with
-            # the initial GET, so we just return that data
-            self._complete = True
-            return self._current_content
-
-        if not self._iter_chunks:
-            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
-        else:
-            try:
-                chunk = next(self._iter_chunks)
-            except StopIteration:
-                raise StopAsyncIteration("Download complete")
-            self._current_content = await self._iter_downloader.yield_chunk(chunk)
-
-        return self._current_content
-
-
-class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
-    """A streaming object to download from Azure Storage.
-
-    :ivar str name:
-        The name of the blob being downloaded.
-    :ivar str container:
-        The name of the container where the blob is.
-    :ivar ~azure.storage.blob.BlobProperties properties:
-        The properties of the blob being downloaded. If only a range of the data is being
-        downloaded, this will be reflected in the properties.
-    :ivar int size:
-        The size of the total data in the stream. This will be the byte range if speficied,
-        otherwise the total size of the blob.
-    """
-
-    def __init__(
-            self,
-            clients=None,
-            config=None,
-            start_range=None,
-            end_range=None,
-            validate_content=None,
-            encryption_options=None,
-            max_concurrency=1,
-            name=None,
-            container=None,
-            encoding=None,
-            **kwargs
-    ):
-        self.name = name
-        self.container = container
-        self.properties = None
-        self.size = None
-
-        self._clients = clients
-        self._config = config
-        self._start_range = start_range
-        self._end_range = end_range
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        self._validate_content = validate_content
-        self._encryption_options = encryption_options or {}
-        self._request_options = kwargs
-        self._location_mode = None
-        self._download_complete = False
-        self._current_content = None
-        self._file_size = None
-        self._non_empty_ranges = None
-        self._response = None
-
-        # The service only provides transactional MD5s for chunks under 4MB.
-        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
-        # chunk so a transactional MD5 can be retrieved.
-        self._first_get_size = self._config.max_single_get_size if not self._validate_content \
-            else self._config.max_chunk_get_size
-        initial_request_start = self._start_range if self._start_range is not None else 0
-        if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
-            initial_request_end = self._end_range
-        else:
-            initial_request_end = initial_request_start + self._first_get_size - 1
-
-        self._initial_range, self._initial_offset = process_range_and_offset(
-            initial_request_start, initial_request_end, self._end_range, self._encryption_options
-        )
-
-    def __len__(self):
-        return self.size
-
-    async def _setup(self):
-        self._response = await self._initial_request()
-        self.properties = self._response.properties
-        self.properties.name = self.name
-        self.properties.container = self.container
-
-        # Set the content length to the download size instead of the size of
-        # the last range
-        self.properties.size = self.size
-
-        # Overwrite the content range to the user requested range
-        self.properties.content_range = 'bytes {0}-{1}/{2}'.format(
-            self._start_range,
-            self._end_range,
-            self._file_size
-        )
-
-        # Overwrite the content MD5 as it is the MD5 for the last range instead
-        # of the stored MD5
-        # TODO: Set to the stored MD5 when the service returns this
-        self.properties.content_md5 = None
-
-        if self.size == 0:
-            self._current_content = b""
-        else:
-            self._current_content = await process_content(
-                self._response,
-                self._initial_offset[0],
-                self._initial_offset[1],
-                self._encryption_options
-            )
-
-    async def _initial_request(self):
-        range_header, range_validation = validate_and_format_range_headers(
-            self._initial_range[0],
-            self._initial_range[1],
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=self._validate_content)
-
-        try:
-            location_mode, response = await self._clients.blob.download(
-                range=range_header,
-                range_get_content_md5=range_validation,
-                validate_content=self._validate_content,
-                data_stream_total=None,
-                download_stream_current=0,
-                **self._request_options)
-
-            # Check the location we read from to ensure we use the same one
-            # for subsequent requests.
-            self._location_mode = location_mode
-
-            # Parse the total file size and adjust the download size if ranges
-            # were specified
-            self._file_size = parse_length_from_content_range(response.properties.content_range)
-            if self._end_range is not None:
-                # Use the length unless it is over the end of the file
-                self.size = min(self._file_size, self._end_range - self._start_range + 1)
-            elif self._start_range is not None:
-                self.size = self._file_size - self._start_range
-            else:
-                self.size = self._file_size
-
-        except HttpResponseError as error:
-            if self._start_range is None and error.response.status_code == 416:
-                # Get range will fail on an empty file. If the user did not
-                # request a range, do a regular get request in order to get
-                # any properties.
-                try:
-                    _, response = await self._clients.blob.download(
-                        validate_content=self._validate_content,
-                        data_stream_total=0,
-                        download_stream_current=0,
-                        **self._request_options)
-                except HttpResponseError as error:
-                    process_storage_error(error)
-
-                # Set the download size to empty
-                self.size = 0
-                self._file_size = 0
-            else:
-                process_storage_error(error)
-
-        # get page ranges to optimize downloading sparse page blob
-        if response.properties.blob_type == 'PageBlob':
-            try:
-                page_ranges = await self._clients.page_blob.get_page_ranges()
-                self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
-            except HttpResponseError:
-                pass
-
-        # If the file is small, the download is complete at this point.
-        # If file size is large, download the rest of the file in chunks.
-        if response.properties.size != self.size:
-            # Lock on the etag. This can be overriden by the user by specifying '*'
-            if self._request_options.get('modified_access_conditions'):
-                if not self._request_options['modified_access_conditions'].if_match:
-                    self._request_options['modified_access_conditions'].if_match = response.properties.etag
-        else:
-            self._download_complete = True
-        return response
-
-    def chunks(self):
-        """Iterate over chunks in the download stream.
-
-        :rtype: Iterable[bytes]
-        """
-        if self.size == 0 or self._download_complete:
-            iter_downloader = None
-        else:
-            data_end = self._file_size
-            if self._end_range is not None:
-                # Use the length unless it is over the end of the file
-                data_end = min(self._file_size, self._end_range + 1)
-            iter_downloader = _AsyncChunkDownloader(
-                client=self._clients.blob,
-                non_empty_ranges=self._non_empty_ranges,
-                total_size=self.size,
-                chunk_size=self._config.max_chunk_get_size,
-                current_progress=self._first_get_size,
-                start_range=self._initial_range[1] + 1,  # Start where the first download ended
-                end_range=data_end,
-                stream=None,
-                parallel=False,
-                validate_content=self._validate_content,
-                encryption_options=self._encryption_options,
-                use_location=self._location_mode,
-                **self._request_options)
-        return _AsyncChunkIterator(
-            size=self.size,
-            content=self._current_content,
-            downloader=iter_downloader)
-
-    async def readall(self):
-        """Download the contents of this blob.
-
-        This operation is blocking until all data is downloaded.
-        :rtype: bytes or str
-        """
-        stream = BytesIO()
-        await self.readinto(stream)
-        data = stream.getvalue()
-        if self._encoding:
-            return data.decode(self._encoding)
-        return data
-
-    async def content_as_bytes(self, max_concurrency=1):
-        """Download the contents of this file.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :rtype: bytes
-        """
-        warnings.warn(
-            "content_as_bytes is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        return await self.readall()
-
-    async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
-        """Download the contents of this blob, and decode as text.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :param str encoding:
-            Test encoding to decode the downloaded bytes. Default is UTF-8.
-        :rtype: str
-        """
-        warnings.warn(
-            "content_as_text is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        return await self.readall()
-
-    async def readinto(self, stream):
-        """Download the contents of this blob to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The number of bytes read.
-        :rtype: int
-        """
-        # the stream must be seekable if parallel download is required
-        parallel = self._max_concurrency > 1
-        if parallel:
-            error_message = "Target stream handle must be seekable."
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(error_message)
-
-            try:
-                stream.seek(stream.tell())
-            except (NotImplementedError, AttributeError):
-                raise ValueError(error_message)
-
-        # Write the content to the user stream
-        stream.write(self._current_content)
-        if self._download_complete:
-            return self.size
-
-        data_end = self._file_size
-        if self._end_range is not None:
-            # Use the length unless it is over the end of the file
-            data_end = min(self._file_size, self._end_range + 1)
-
-        downloader = _AsyncChunkDownloader(
-            client=self._clients.blob,
-            non_empty_ranges=self._non_empty_ranges,
-            total_size=self.size,
-            chunk_size=self._config.max_chunk_get_size,
-            current_progress=self._first_get_size,
-            start_range=self._initial_range[1] + 1,  # start where the first download ended
-            end_range=data_end,
-            stream=stream,
-            parallel=parallel,
-            validate_content=self._validate_content,
-            encryption_options=self._encryption_options,
-            use_location=self._location_mode,
-            **self._request_options)
-
-        dl_tasks = downloader.get_chunk_offsets()
-        running_futures = [
-            asyncio.ensure_future(downloader.process_chunk(d))
-            for d in islice(dl_tasks, 0, self._max_concurrency)
-        ]
-        while running_futures:
-            # Wait for some download to finish before adding a new one
-            _done, running_futures = await asyncio.wait(
-                running_futures, return_when=asyncio.FIRST_COMPLETED)
-            try:
-                next_chunk = next(dl_tasks)
-            except StopIteration:
-                break
-            else:
-                running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
-
-        if running_futures:
-            # Wait for the remaining downloads to finish
-            await asyncio.wait(running_futures)
-        return self.size
-
-    async def download_to_stream(self, stream, max_concurrency=1):
-        """Download the contents of this blob to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The properties of the downloaded blob.
-        :rtype: Any
-        """
-        warnings.warn(
-            "download_to_stream is deprecated, use readinto instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        await self.readinto(stream)
-        return self.properties
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_lease_async.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_lease_async.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_lease_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_lease_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,296 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
-    TypeVar, TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator_async import distributed_trace_async
-
-from .._shared.response_handlers import return_response_headers, process_storage_error
-from .._generated.models import (
-    StorageErrorException,
-    LeaseAccessConditions)
-from .._serialize import get_modify_conditions
-from .._lease import BlobLeaseClient as LeaseClientBase
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from .._generated.operations import BlobOperations, ContainerOperations
-    BlobClient = TypeVar("BlobClient")
-    ContainerClient = TypeVar("ContainerClient")
-
-
-class BlobLeaseClient(LeaseClientBase):
-    """Creates a new BlobLeaseClient.
-
-    This client provides lease operations on a BlobClient or ContainerClient.
-
-    :ivar str id:
-        The ID of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired.
-    :ivar str etag:
-        The ETag of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired or modified.
-    :ivar ~datetime.datetime last_modified:
-        The last modified timestamp of the lease currently being maintained.
-        This will be `None` if no lease has yet been acquired or modified.
-
-    :param client:
-        The client of the blob or container to lease.
-    :type client: ~azure.storage.blob.aio.BlobClient or
-        ~azure.storage.blob.aio.ContainerClient
-    :param str lease_id:
-        A string representing the lease ID of an existing lease. This value does not
-        need to be specified in order to acquire a new lease, or break one.
-    """
-
-    def __enter__(self):
-        raise TypeError("Async lease must use 'async with'.")
-
-    def __exit__(self, *args):
-        self.release()
-
-    async def __aenter__(self):
-        return self
-
-    async def __aexit__(self, *args):
-        await self.release()
-
-    @distributed_trace_async
-    async def acquire(self, lease_duration=-1, **kwargs):
-        # type: (int, Any) -> None
-        """Requests a new lease.
-
-        If the container does not have an active lease, the Blob service creates a
-        lease on the container and returns a new lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = await self._client.acquire_lease(
-                timeout=kwargs.pop('timeout', None),
-                duration=lease_duration,
-                proposed_lease_id=self.id,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-        self.etag = kwargs.get('etag')  # type: str
-
-    @distributed_trace_async
-    async def renew(self, **kwargs):
-        # type: (Any) -> None
-        """Renews the lease.
-
-        The lease can be renewed if the lease ID specified in the
-        lease client matches that associated with the container or blob. Note that
-        the lease may be renewed even if it has expired as long as the container
-        or blob has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = await self._client.renew_lease(
-                lease_id=self.id,
-                timeout=kwargs.pop('timeout', None),
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace_async
-    async def release(self, **kwargs):
-        # type: (Any) -> None
-        """Release the lease.
-
-        The lease may be released if the client lease id specified matches
-        that associated with the container or blob. Releasing the lease allows another client
-        to immediately acquire the lease for the container or blob as soon as the release is complete.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = await self._client.release_lease(
-                lease_id=self.id,
-                timeout=kwargs.pop('timeout', None),
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace_async
-    async def change(self, proposed_lease_id, **kwargs):
-        # type: (str, Any) -> None
-        """Change the lease ID of an active lease.
-
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = await self._client.change_lease(
-                lease_id=self.id,
-                proposed_lease_id=proposed_lease_id,
-                timeout=kwargs.pop('timeout', None),
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace_async
-    async def break_lease(self, lease_break_period=None, **kwargs):
-        # type: (Optional[int], Any) -> int
-        """Break the lease, if the container or blob has an active lease.
-
-        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container or blob.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired.
-
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :rtype: int
-        """
-        mod_conditions = get_modify_conditions(kwargs)
-        try:
-            response = await self._client.break_lease(
-                timeout=kwargs.pop('timeout', None),
-                break_period=lease_break_period,
-                modified_access_conditions=mod_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return response.get('lease_time') # type: ignore
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_models.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,226 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called, too-many-lines
-
-from typing import List, Any, TYPE_CHECKING # pylint: disable=unused-import
-
-from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged
-
-from .._models import BlobProperties, ContainerProperties
-from .._shared.response_handlers import return_context_and_deserialized, process_storage_error
-from .._shared.models import DictMixin
-
-from .._generated.models import StorageErrorException
-from .._generated.models import BlobPrefix as GenBlobPrefix
-from .._generated.models import BlobItem
-
-
-class ContainerPropertiesPaged(AsyncPageIterator):
-    """An Iterable of Container properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A container name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.blob.models.ContainerProperties)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only containers whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of container names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(ContainerPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.current_page = [self._build_item(item) for item in self._response.container_items]
-
-        return self._response.next_marker or None, self.current_page
-
-    @staticmethod
-    def _build_item(item):
-        return ContainerProperties._from_generated(item)  # pylint: disable=protected-access
-
-
-class BlobPropertiesPaged(AsyncPageIterator):
-    """An Iterable of Blob properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A blob name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.blob.models.BlobProperties)
-    :ivar str container: The container that the blobs are listed from.
-    :ivar str delimiter: A delimiting character used for hierarchy listing.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str container: The container that the blobs are listed from.
-    :param str prefix: Filters the results to return only blobs whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of blobs to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    :param str delimiter:
-        Used to capture blobs whose names begin with the same substring up to
-        the appearance of the delimiter character. The delimiter may be a single
-        character or a string.
-    :param location_mode: Specifies the location the request should be sent to.
-        This mode only applies for RA-GRS accounts which allow secondary read access.
-        Options include 'primary' or 'secondary'.
-    """
-    def __init__(
-            self, command,
-            container=None,
-            prefix=None,
-            results_per_page=None,
-            continuation_token=None,
-            delimiter=None,
-            location_mode=None):
-        super(BlobPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.container = container
-        self.delimiter = delimiter
-        self.current_page = None
-        self.location_mode = location_mode
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                prefix=self.prefix,
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.container = self._response.container_name
-        self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
-
-        return self._response.next_marker or None, self.current_page
-
-    def _build_item(self, item):
-        if isinstance(item, BlobProperties):
-            return item
-        if isinstance(item, BlobItem):
-            blob = BlobProperties._from_generated(item)  # pylint: disable=protected-access
-            blob.container = self.container
-            return blob
-        return item
-
-
-class BlobPrefix(AsyncItemPaged, DictMixin):
-    """An Iterable of Blob properties.
-
-    Returned from walk_blobs when a delimiter is used.
-    Can be thought of as a virtual blob directory.
-
-    :ivar str name: The prefix, or "directory name" of the blob.
-    :ivar str prefix: A blob name prefix being used to filter the list.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.blob.models.BlobProperties)
-    :ivar str container: The container that the blobs are listed from.
-    :ivar str delimiter: A delimiting character used for hierarchy listing.
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only blobs whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of blobs to retrieve per
-        call.
-    :param str marker: An opaque continuation token.
-    :param str delimiter:
-        Used to capture blobs whose names begin with the same substring up to
-        the appearance of the delimiter character. The delimiter may be a single
-        character or a string.
-    :param location_mode: Specifies the location the request should be sent to.
-        This mode only applies for RA-GRS accounts which allow secondary read access.
-        Options include 'primary' or 'secondary'.
-    """
-    def __init__(self, *args, **kwargs):
-        super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
-        self.name = kwargs.get('prefix')
-        self.prefix = kwargs.get('prefix')
-        self.results_per_page = kwargs.get('results_per_page')
-        self.container = kwargs.get('container')
-        self.delimiter = kwargs.get('delimiter')
-        self.location_mode = kwargs.get('location_mode')
-
-
-class BlobPrefixPaged(BlobPropertiesPaged):
-    def __init__(self, *args, **kwargs):
-        super(BlobPrefixPaged, self).__init__(*args, **kwargs)
-        self.name = self.prefix
-
-    async def _extract_data_cb(self, get_next_return):
-        continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
-        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
-        self.current_page = [self._build_item(item) for item in self.current_page]
-        self.delimiter = self._response.delimiter
-
-        return continuation_token, self.current_page
-
-    def _build_item(self, item):
-        item = super(BlobPrefixPaged, self)._build_item(item)
-        if isinstance(item, GenBlobPrefix):
-            return BlobPrefix(
-                self._command,
-                container=self.container,
-                prefix=item.name,
-                results_per_page=self.results_per_page,
-                location_mode=self.location_mode)
-        return item
diff -pruN 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_upload_helpers.py 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_upload_helpers.py
--- 1.4.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_upload_helpers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/blob/v2019_07_07/aio/_upload_helpers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,256 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from io import SEEK_SET, UnsupportedOperation
-from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import
-
-import six
-from azure.core.exceptions import ResourceModifiedError
-
-from .._shared.response_handlers import (
-    process_storage_error,
-    return_response_headers)
-from .._shared.uploads_async import (
-    upload_data_chunks,
-    upload_substream_blocks,
-    BlockBlobChunkUploader,
-    PageBlobChunkUploader,
-    AppendBlobChunkUploader)
-from .._shared.encryption import generate_blob_encryption_data, encrypt_blob
-from .._generated.models import (
-    StorageErrorException,
-    BlockLookupList,
-    AppendPositionAccessConditions,
-    ModifiedAccessConditions,
-)
-from .._upload_helpers import _convert_mod_error, _any_conditions
-
-if TYPE_CHECKING:
-    from datetime import datetime # pylint: disable=unused-import
-    BlobLeaseClient = TypeVar("BlobLeaseClient")
-
-
-async def upload_block_blob(  # pylint: disable=too-many-locals
-        client=None,
-        data=None,
-        stream=None,
-        length=None,
-        overwrite=None,
-        headers=None,
-        validate_content=None,
-        max_concurrency=None,
-        blob_settings=None,
-        encryption_options=None,
-        **kwargs):
-    try:
-        if not overwrite and not _any_conditions(**kwargs):
-            kwargs['modified_access_conditions'].if_none_match = '*'
-        adjusted_count = length
-        if (encryption_options.get('key') is not None) and (adjusted_count is not None):
-            adjusted_count += (16 - (length % 16))
-        blob_headers = kwargs.pop('blob_headers', None)
-        tier = kwargs.pop('standard_blob_tier', None)
-
-        # Do single put if the size is smaller than config.max_single_put_size
-        if adjusted_count is not None and (adjusted_count < blob_settings.max_single_put_size):
-            try:
-                data = data.read(length)
-                if not isinstance(data, six.binary_type):
-                    raise TypeError('Blob data should be of type bytes.')
-            except AttributeError:
-                pass
-            if encryption_options.get('key'):
-                encryption_data, data = encrypt_blob(data, encryption_options['key'])
-                headers['x-ms-meta-encryptiondata'] = encryption_data
-            return await client.upload(
-                data,
-                content_length=adjusted_count,
-                blob_http_headers=blob_headers,
-                headers=headers,
-                cls=return_response_headers,
-                validate_content=validate_content,
-                data_stream_total=adjusted_count,
-                upload_stream_current=0,
-                tier=tier.value if tier else None,
-                **kwargs)
-
-        use_original_upload_path = blob_settings.use_byte_buffer or \
-            validate_content or encryption_options.get('required') or \
-            blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
-            hasattr(stream, 'seekable') and not stream.seekable() or \
-            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
-
-        if use_original_upload_path:
-            if encryption_options.get('key'):
-                cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key'])
-                headers['x-ms-meta-encryptiondata'] = encryption_data
-                encryption_options['cek'] = cek
-                encryption_options['vector'] = iv
-            block_ids = await upload_data_chunks(
-                service=client,
-                uploader_class=BlockBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                max_concurrency=max_concurrency,
-                stream=stream,
-                validate_content=validate_content,
-                encryption_options=encryption_options,
-                **kwargs
-            )
-        else:
-            block_ids = await upload_substream_blocks(
-                service=client,
-                uploader_class=BlockBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                max_concurrency=max_concurrency,
-                stream=stream,
-                validate_content=validate_content,
-                **kwargs
-            )
-
-        block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
-        block_lookup.latest = block_ids
-        return await client.commit_block_list(
-            block_lookup,
-            blob_http_headers=blob_headers,
-            cls=return_response_headers,
-            validate_content=validate_content,
-            headers=headers,
-            tier=tier.value if tier else None,
-            **kwargs)
-    except StorageErrorException as error:
-        try:
-            process_storage_error(error)
-        except ResourceModifiedError as mod_error:
-            if not overwrite:
-                _convert_mod_error(mod_error)
-            raise
-
-
-async def upload_page_blob(
-        client=None,
-        stream=None,
-        length=None,
-        overwrite=None,
-        headers=None,
-        validate_content=None,
-        max_concurrency=None,
-        blob_settings=None,
-        encryption_options=None,
-        **kwargs):
-    try:
-        if not overwrite and not _any_conditions(**kwargs):
-            kwargs['modified_access_conditions'].if_none_match = '*'
-        if length is None or length < 0:
-            raise ValueError("A content length must be specified for a Page Blob.")
-        if length % 512 != 0:
-            raise ValueError("Invalid page blob size: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(length))
-        if kwargs.get('premium_page_blob_tier'):
-            premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
-            try:
-                headers['x-ms-access-tier'] = premium_page_blob_tier.value
-            except AttributeError:
-                headers['x-ms-access-tier'] = premium_page_blob_tier
-        if encryption_options and encryption_options.get('data'):
-            headers['x-ms-meta-encryptiondata'] = encryption_options['data']
-        response = await client.create(
-            content_length=0,
-            blob_content_length=length,
-            blob_sequence_number=None,
-            blob_http_headers=kwargs.pop('blob_headers', None),
-            cls=return_response_headers,
-            headers=headers,
-            **kwargs)
-        if length == 0:
-            return response
-
-        kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
-        return await upload_data_chunks(
-            service=client,
-            uploader_class=PageBlobChunkUploader,
-            total_size=length,
-            chunk_size=blob_settings.max_page_size,
-            stream=stream,
-            max_concurrency=max_concurrency,
-            validate_content=validate_content,
-            encryption_options=encryption_options,
-            **kwargs)
-
-    except StorageErrorException as error:
-        try:
-            process_storage_error(error)
-        except ResourceModifiedError as mod_error:
-            if not overwrite:
-                _convert_mod_error(mod_error)
-            raise
-
-
-async def upload_append_blob(  # pylint: disable=unused-argument
-        client=None,
-        stream=None,
-        length=None,
-        overwrite=None,
-        headers=None,
-        validate_content=None,
-        max_concurrency=None,
-        blob_settings=None,
-        encryption_options=None,
-        **kwargs):
-    try:
-        if length == 0:
-            return {}
-        blob_headers = kwargs.pop('blob_headers', None)
-        append_conditions = AppendPositionAccessConditions(
-            max_size=kwargs.pop('maxsize_condition', None),
-            append_position=None)
-        try:
-            if overwrite:
-                await client.create(
-                    content_length=0,
-                    blob_http_headers=blob_headers,
-                    headers=headers,
-                    **kwargs)
-            return await upload_data_chunks(
-                service=client,
-                uploader_class=AppendBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                stream=stream,
-                max_concurrency=max_concurrency,
-                validate_content=validate_content,
-                append_position_access_conditions=append_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            if error.response.status_code != 404:
-                raise
-            # rewind the request body if it is a stream
-            if hasattr(stream, 'read'):
-                try:
-                    # attempt to rewind the body to the initial position
-                    stream.seek(0, SEEK_SET)
-                except UnsupportedOperation:
-                    # if body is not seekable, then retry would not work
-                    raise error
-            await client.create(
-                content_length=0,
-                blob_http_headers=blob_headers,
-                headers=headers,
-                **kwargs)
-            return await upload_data_chunks(
-                service=client,
-                uploader_class=AppendBlobChunkUploader,
-                total_size=length,
-                chunk_size=blob_settings.max_block_size,
-                stream=stream,
-                max_concurrency=max_concurrency,
-                validate_content=validate_content,
-                append_position_access_conditions=append_conditions,
-                **kwargs)
-    except StorageErrorException as error:
-        process_storage_error(error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/__init__.py	2025-06-18 05:27:42.000000000 +0000
@@ -1 +1 @@
-﻿__import__('pkg_resources').declare_namespace(__name__)
+﻿
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,67 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._data_lake_file_client import DataLakeFileClient
-from ._data_lake_directory_client import DataLakeDirectoryClient
-from ._file_system_client import FileSystemClient
-from ._data_lake_service_client import DataLakeServiceClient
-from ._data_lake_lease import DataLakeLeaseClient
-from ._models import (
-    LocationMode,
-    ResourceTypes,
-    FileSystemProperties,
-    FileSystemPropertiesPaged,
-    DirectoryProperties,
-    PathProperties,
-    PathPropertiesPaged,
-    LeaseProperties,
-    ContentSettings,
-    AccountSasPermissions,
-    FileSystemSasPermissions,
-    DirectorySasPermissions,
-    FileSasPermissions,
-    UserDelegationKey,
-    PublicAccess
-)
-from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \
-    generate_file_sas
-
-from ._shared.policies import ExponentialRetry, LinearRetry
-from ._shared.models import StorageErrorCode
-from ._version import VERSION
-
-__version__ = VERSION
-
-__all__ = [
-    'DataLakeServiceClient',
-    'FileSystemClient',
-    'DataLakeFileClient',
-    'DataLakeDirectoryClient',
-    'DataLakeLeaseClient',
-    'ExponentialRetry',
-    'LinearRetry',
-    'LocationMode',
-    'PublicAccess',
-    'ResourceTypes',
-    'StorageErrorCode',
-    'UserDelegationKey',
-    'FileSystemProperties',
-    'FileSystemPropertiesPaged',
-    'DirectoryProperties',
-    'PathProperties',
-    'PathPropertiesPaged',
-    'LeaseProperties',
-    'ContentSettings',
-    'AccountSasPermissions',
-    'FileSystemSasPermissions',
-    'DirectorySasPermissions',
-    'FileSasPermissions',
-    'generate_account_sas',
-    'generate_file_system_sas',
-    'generate_directory_sas',
-    'generate_file_sas',
-    'VERSION',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,528 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ._shared.base_client import parse_connection_str
-from ._data_lake_file_client import DataLakeFileClient
-from ._models import DirectoryProperties
-from ._path_client import PathClient
-
-
-class DataLakeDirectoryClient(PathClient):
-    """A client to interact with the DataLake directory, even if the directory may not yet exist.
-
-    For operations relating to a specific subdirectory or file under the directory, a directory client or file client
-    can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
-
-    :ivar str url:
-        The full endpoint URL to the file system, including SAS token if used.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URI to the storage account.
-    :param file_system_name:
-        The file system for the directory or files.
-    :type file_system_name: str
-    :param directory_name:
-        The whole path of the directory. eg. {directory under file system}/{directory to interact with}
-    :type directory_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client]
-            :end-before: [END create_datalake_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client_oauth]
-            :end-before: [END create_datalake_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
-    """
-    def __init__(
-        self, account_url,  # type: str
-        file_system_name,  # type: str
-        directory_name,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name,
-                                                      credential=credential, **kwargs)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            file_system_name,  # type: str
-            directory_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> DataLakeDirectoryClient
-        """
-        Create DataLakeDirectoryClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param file_system_name: The name of file system to interact with.
-        :type file_system_name: str
-        :param directory_name: The name of directory to interact with. The directory is under file system.
-        :type directory_name: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, and account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :return a DataLakeDirectoryClient
-        :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, file_system_name=file_system_name, directory_name=directory_name,
-            credential=credential, **kwargs)
-
-    def create_directory(self, content_settings=None,  # type: Optional[ContentSettings]
-                         metadata=None,  # type: Optional[Dict[str, str]]
-                         **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Create a new directory.
-
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: response dict (Etag and last modified).
-        """
-        return self._create('directory', content_settings=content_settings, metadata=metadata, **kwargs)
-
-    def delete_directory(self, **kwargs):
-        # type: (...) -> None
-        """
-        Marks the specified directory for deletion.
-
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        return self._delete(**kwargs)
-
-    def get_directory_properties(self, **kwargs):
-        # type: (**Any) -> DirectoryProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the directory. It does not return the content of the directory.
-
-        :keyword lease:
-            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: DirectoryProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a file/directory.
-        """
-        blob_properties = self._get_path_properties(**kwargs)
-        return DirectoryProperties._from_blob_properties(blob_properties)  # pylint: disable=protected-access
-
-    def rename_directory(self, rename_destination, **kwargs):
-        # type: (**Any) -> DataLakeDirectoryClient
-        """
-        Rename the source directory.
-
-        :param str rename_destination: the new directory name the user want to rename to.
-            The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
-        :keyword source_lease: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient
-        """
-        rename_destination = rename_destination.strip('/')
-        new_file_system = rename_destination.split('/')[0]
-        path = rename_destination[len(new_file_system):]
-
-        new_directory_client = DataLakeDirectoryClient(
-            self.url, new_file_system, directory_name=path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-        new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name,  # pylint: disable=protected-access
-                                          **kwargs)
-        return new_directory_client
-
-    def create_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
-                             content_settings=None,  # type: Optional[ContentSettings]
-                             metadata=None,  # type: Optional[Dict[str, str]]
-                             **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Create a subdirectory and return the subdirectory client to be interacted with.
-
-        :param sub_directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient for the subdirectory.
-        """
-        subdir = self.get_sub_directory_client(sub_directory)
-        subdir.create_directory(content_settings=content_settings, metadata=metadata, **kwargs)
-        return subdir
-
-    def delete_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
-                             **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Marks the specified subdirectory for deletion.
-
-        :param sub_directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient for the subdirectory
-        """
-        subdir = self.get_sub_directory_client(sub_directory)
-        subdir.delete_directory(**kwargs)
-        return subdir
-
-    def create_file(self, file,  # type: Union[FileProperties, str]
-                    **kwargs):
-        # type: (...) -> DataLakeFileClient
-        """
-        Create a new file and return the file client to be interacted with.
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties.
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :keyword metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeFileClient
-        """
-        file_client = self.get_file_client(file)
-        file_client.create_file(**kwargs)
-        return file_client
-
-    def get_file_client(self, file  # type: Union[FileProperties, str]
-                        ):
-        # type: (...) -> DataLakeFileClient
-        """Get a client to interact with the specified file.
-
-        The file need not already exist.
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties. eg. directory/subdirectory/file
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :returns: A DataLakeFileClient.
-        :rtype: ~azure.storage.filedatalake..DataLakeFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_file_client]
-                :end-before: [END bsc_get_file_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the file client to interact with a specific file.
-        """
-        try:
-            file_path = file.name
-        except AttributeError:
-            file_path = self.path_name + '/' + file
-
-        return DataLakeFileClient(
-            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-
-    def get_sub_directory_client(self, sub_directory  # type: Union[DirectoryProperties, str]
-                                 ):
-        # type: (...) -> DataLakeDirectoryClient
-        """Get a client to interact with the specified subdirectory of the current directory.
-
-        The sub subdirectory need not already exist.
-
-        :param sub_directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :returns: A DataLakeDirectoryClient.
-        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_directory_client]
-                :end-before: [END bsc_get_directory_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the directory client to interact with a specific directory.
-        """
-        try:
-            subdir_path = sub_directory.name
-        except AttributeError:
-            subdir_path = self.path_name + '/' + sub_directory
-
-        return DataLakeDirectoryClient(
-            self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,509 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import six
-
-from ._shared.base_client import parse_connection_str
-from ._shared.request_handlers import get_length, read_length
-from ._shared.response_handlers import return_response_headers
-from ._generated.models import StorageErrorException
-from ._path_client import PathClient
-from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions
-from ._deserialize import process_storage_error
-from ._models import FileProperties
-
-
-class DataLakeFileClient(PathClient):
-    """A client to interact with the DataLake file, even if the file may not yet exist.
-
-    :ivar str url:
-        The full endpoint URL to the file system, including SAS token if used.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URI to the storage account.
-    :param file_system_name:
-        The file system for the directory or files.
-    :type file_system_name: str
-    :param file_path:
-        The whole file path, so that to interact with a specific file.
-        eg. "{directory}/{subdirectory}/{file}"
-    :type file_path: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client]
-            :end-before: [END create_datalake_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client_oauth]
-            :end-before: [END create_datalake_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
-    """
-    def __init__(
-        self, account_url,  # type: str
-        file_system_name,  # type: str
-        file_path,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
-                                                 credential=credential, **kwargs)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            file_system_name,  # type: str
-            file_path,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> DataLakeFileClient
-        """
-        Create DataLakeFileClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param file_system_name: The name of file system to interact with.
-        :type file_system_name: str
-        :param directory_name: The name of directory to interact with. The directory is under file system.
-        :type directory_name: str
-        :param file_name: The name of file to interact with. The file is under directory.
-        :type file_name: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, and account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :return a DataLakeFileClient
-        :rtype ~azure.storage.filedatalake.DataLakeFileClient
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, file_system_name=file_system_name, file_path=file_path,
-            credential=credential, **kwargs)
-
-    def create_file(self, content_settings=None,  # type: Optional[ContentSettings]
-                    metadata=None,  # type: Optional[Dict[str, str]]
-                    **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Create a new file.
-
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: response dict (Etag and last modified).
-        """
-        return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
-
-    def delete_file(self, **kwargs):
-        # type: (...) -> None
-        """
-        Marks the specified file for deletion.
-
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        return self._delete(**kwargs)
-
-    def get_file_properties(self, **kwargs):
-        # type: (**Any) -> FileProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file. It does not return the content of the file.
-
-        :keyword lease:
-            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: FileProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a file/directory.
-        """
-        blob_properties = self._get_path_properties(**kwargs)
-        return FileProperties._from_blob_properties(blob_properties)  # pylint: disable=protected-access
-
-    @staticmethod
-    def _append_data_options(data, offset, length=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-
-        if isinstance(data, six.text_type):
-            data = data.encode(kwargs.pop('encoding', 'UTF-8'))  # type: ignore
-        if length is None:
-            length = get_length(data)
-            if length is None:
-                length, data = read_length(data)
-        if isinstance(data, bytes):
-            data = data[:length]
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-
-        options = {
-            'body': data,
-            'position': offset,
-            'content_length': length,
-            'lease_access_conditions': access_conditions,
-            'validate_content': kwargs.pop('validate_content', False),
-            'timeout': kwargs.pop('timeout', None),
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    def append_data(self, data,  # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
-                    offset,  # type: int
-                    length=None,  # type: Optional[int]
-                    **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime, int]]
-        """Append data to the file.
-
-        :param data: Content to be appended to file
-        :param offset: start position of the data to be appended to.
-        :param length: Size of the data in bytes.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :return: dict of the response header
-        """
-        options = self._append_data_options(
-            data,
-            offset,
-            length=length,
-            **kwargs)
-        try:
-            return self._client.path.append_data(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @staticmethod
-    def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_mod_conditions(kwargs)
-
-        path_http_headers = None
-        if content_settings:
-            path_http_headers = get_path_http_headers(content_settings)
-
-        options = {
-            'position': offset,
-            'content_length': 0,
-            'path_http_headers': path_http_headers,
-            'retain_uncommitted_data': retain_uncommitted_data,
-            'close': kwargs.pop('close', False),
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    def flush_data(self, offset,  # type: int
-                   retain_uncommitted_data=False,   # type: Optional[bool]
-                   **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """ Commit the previous appended data.
-
-        :param offset: offset is equal to the length of the file after commit the
-            previous appended data.
-        :param bool retain_uncommitted_data: Valid only for flush operations.  If
-            "true", uncommitted data is retained after the flush operation
-            completes; otherwise, the uncommitted data is deleted after the flush
-            operation.  The default is false.  Data at offsets less than the
-            specified position are written to the file when flush succeeds, but
-            this optional parameter allows data after the flush position to be
-            retained for a future flush operation.
-        :keyword bool close: Azure Storage Events allow applications to receive
-            notifications when files change. When Azure Storage Events are
-            enabled, a file changed event is raised. This event has a property
-            indicating whether this is the final change to distinguish the
-            difference between an intermediate flush to a file stream and the
-            final close of a file stream. The close query parameter is valid only
-            when the action is "flush" and change notifications are enabled. If
-            the value of close is "true" and the flush operation completes
-            successfully, the service raises a file change notification with a
-            property indicating that this is the final update (the file stream has
-            been closed). If "false" a change notification is raised indicating
-            the file has changed. The default is false. This query parameter is
-            set to true by the Hadoop ABFS driver to indicate that the file stream
-            has been closed."
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :return: response header in dict
-        """
-        options = self._flush_data_options(
-            offset,
-            retain_uncommitted_data=retain_uncommitted_data, **kwargs)
-        try:
-            return self._client.path.flush_data(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def read_file(self, offset=None,   # type: Optional[int]
-                  length=None,   # type: Optional[int]
-                  stream=None,  # type: Optional[IO]
-                  **kwargs):
-        # type: (...) -> Union[int, byte]
-        """Download a file from the service. Return the downloaded data in bytes or
-        write the downloaded data into user provided stream and return the written size.
-
-        :param int offset:
-            Start of byte range to use for downloading a section of the file.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param int stream:
-            User provided stream to write the downloaded data into.
-        :keyword lease:
-            If specified, download_blob only succeeds if the blob's lease is active
-            and matches this ID. Required if the blob has an active lease.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: downloaded data or the size of data written into the provided stream
-        :rtype: bytes or int
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_hello_world.py
-                :start-after: [START download_a_blob]
-                :end-before: [END download_a_blob]
-                :language: python
-                :dedent: 12
-                :caption: Download a blob.
-        """
-        downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs)
-        if stream:
-            return downloader.readinto(stream)
-        return downloader.readall()
-
-    def rename_file(self, rename_destination, **kwargs):
-        # type: (**Any) -> DataLakeFileClient
-        """
-        Rename the source file.
-
-        :param str rename_destination: the new file name the user want to rename to.
-            The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
-        :keyword source_lease: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-        """
-        rename_destination = rename_destination.strip('/')
-        new_file_system = rename_destination.split('/')[0]
-        path = rename_destination[len(new_file_system):]
-
-        new_directory_client = DataLakeFileClient(
-            self.url, new_file_system, file_path=path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-        new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name,  # pylint: disable=protected-access
-                                          **kwargs)
-        return new_directory_client
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,245 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import uuid
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any,
-    TypeVar, TYPE_CHECKING
-)
-from azure.storage.blob import BlobLeaseClient
-
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    FileSystemClient = TypeVar("FileSystemClient")
-    DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
-    DataLakeFileClient = TypeVar("DataLakeFileClient")
-
-
-class DataLakeLeaseClient(object):
-    """Creates a new DataLakeLeaseClient.
-
-    This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
-
-    :ivar str id:
-        The ID of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired.
-    :ivar str etag:
-        The ETag of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired or modified.
-    :ivar ~datetime.datetime last_modified:
-        The last modified timestamp of the lease currently being maintained.
-        This will be `None` if no lease has yet been acquired or modified.
-
-    :param client:
-        The client of the file system, directory, or file to lease.
-    :type client: ~azure.storage.filedatalake.FileSystemClient or
-        ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient
-    :param str lease_id:
-        A string representing the lease ID of an existing lease. This value does not
-        need to be specified in order to acquire a new lease, or break one.
-    """
-    def __init__(
-            self, client, lease_id=None
-    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
-        # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
-        self.id = lease_id or str(uuid.uuid4())
-        self.last_modified = None
-        self.etag = None
-
-        if hasattr(client, '_blob_client'):
-            _client = client._blob_client  # type: ignore # pylint: disable=protected-access
-        elif hasattr(client, '_container_client'):
-            _client = client._container_client  # type: ignore # pylint: disable=protected-access
-        else:
-            raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
-
-        self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *args):
-        self.release()
-
-    def acquire(self, lease_duration=-1, **kwargs):
-        # type: (int, Optional[int], **Any) -> None
-        """Requests a new lease.
-
-        If the container does not have an active lease, the Blob service creates a
-        lease on the container and returns a new lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
-        self._update_lease_client_attributes()
-
-    def renew(self, **kwargs):
-        # type: (Any) -> None
-        """Renews the lease.
-
-        The lease can be renewed if the lease ID specified in the
-        lease client matches that associated with the container or blob. Note that
-        the lease may be renewed even if it has expired as long as the container
-        or blob has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        self._blob_lease_client.renew(**kwargs)
-        self._update_lease_client_attributes()
-
-    def release(self, **kwargs):
-        # type: (Any) -> None
-        """Release the lease.
-
-        The lease may be released if the client lease id specified matches
-        that associated with the container or blob. Releasing the lease allows another client
-        to immediately acquire the lease for the container or blob as soon as the release is complete.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        self._blob_lease_client.release(**kwargs)
-        self._update_lease_client_attributes()
-
-    def change(self, proposed_lease_id, **kwargs):
-        # type: (str, Any) -> None
-        """Change the lease ID of an active lease.
-
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
-        self._update_lease_client_attributes()
-
-    def break_lease(self, lease_break_period=None, **kwargs):
-        # type: (Optional[int], Any) -> int
-        """Break the lease, if the container or blob has an active lease.
-
-        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container or blob.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired.
-
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :rtype: int
-        """
-        self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
-
-    def _update_lease_client_attributes(self):
-        self.id = self._blob_lease_client.id  # type: str
-        self.last_modified = self._blob_lease_client.last_modified  # type: datetime
-        self.etag = self._blob_lease_client.etag  # type: str
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,373 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-try:
-    from urllib.parse import urlparse
-except ImportError:
-    from urlparse import urlparse  # type: ignore
-
-from azure.core.paging import ItemPaged
-
-from azure.storage.blob import BlobServiceClient
-from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str
-from ._file_system_client import FileSystemClient
-from ._data_lake_directory_client import DataLakeDirectoryClient
-from ._data_lake_file_client import DataLakeFileClient
-from ._models import UserDelegationKey, FileSystemPropertiesPaged
-from ._serialize import convert_dfs_url_to_blob_url
-
-
-class DataLakeServiceClient(StorageAccountHostsMixin):
-    """A client to interact with the DataLake Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete file systems within the account.
-    For operations relating to a specific file system, directory or file, clients for those entities
-    can also be retrieved using the `get_client` functions.
-
-    :ivar str url:
-        The full endpoint URL to the datalake service endpoint. This could be either the
-        primary endpoint, or the secondary endpoint depending on the current `location_mode`.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URL to the DataLake storage account. Any other entities included
-        in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client]
-            :end-before: [END create_datalake_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client_oauth]
-            :end-before: [END create_datalake_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        blob_account_url = convert_dfs_url_to_blob_url(account_url)
-        self._blob_account_url = blob_account_url
-        self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs)
-
-        _, sas_token = parse_query(parsed_url.query)
-        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
-
-        super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs',
-                                                    credential=self._raw_credential, **kwargs)
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str)
-        return formated_url
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> DataLakeServiceClient
-        """
-        Create DataLakeServiceClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, and account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :return a DataLakeServiceClient
-        :rtype ~azure.storage.filedatalake.DataLakeServiceClient
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(account_url, credential=credential, **kwargs)
-
-    def get_user_delegation_key(self, key_start_time,  # type: datetime
-                                key_expiry_time,  # type: datetime
-                                **kwargs  # type: Any
-                                ):
-        # type: (...) -> UserDelegationKey
-        """
-        Obtain a user delegation key for the purpose of signing SAS tokens.
-        A token credential must be present on the service object for this request to succeed.
-
-        :param ~datetime.datetime key_start_time:
-            A DateTime value. Indicates when the key becomes valid.
-        :param ~datetime.datetime key_expiry_time:
-            A DateTime value. Indicates when the key stops being valid.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The user delegation key.
-        :rtype: ~azure.storage.filedatalake.UserDelegationKey
-        """
-        delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time,
-                                                                           key_expiry_time=key_expiry_time,
-                                                                           **kwargs)  # pylint: disable=protected-access
-        delegation_key._class_ = UserDelegationKey  # pylint: disable=protected-access
-        return delegation_key
-
-    def list_file_systems(self, name_starts_with=None,  # type: Optional[str]
-                          include_metadata=None,  # type: Optional[bool]
-                          **kwargs):
-        # type: (...) -> ItemPaged[FileSystemProperties]
-        """Returns a generator to list the file systems under the specified account.
-
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all file systems have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only file systems whose names
-            begin with the specified prefix.
-        :param bool include_metadata:
-            Specifies that file system metadata be returned in the response.
-            The default value is `False`.
-        :keyword int results_per_page:
-            The maximum number of file system names to retrieve per API
-            call. If the request does not specify the server will return up to 5,000 items per page.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) of FileSystemProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START dsc_list_file_systems]
-                :end-before: [END dsc_list_file_systems]
-                :language: python
-                :dedent: 12
-                :caption: Listing the file systems in the datalake service.
-        """
-        item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
-                                                               include_metadata=include_metadata,
-                                                               **kwargs)  # pylint: disable=protected-access
-        item_paged._page_iterator_class = FileSystemPropertiesPaged  # pylint: disable=protected-access
-        return item_paged
-
-    def create_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
-                           metadata=None,  # type: Optional[Dict[str, str]]
-                           public_access=None,  # type: Optional[PublicAccess]
-                           **kwargs):
-        # type: (...) -> FileSystemClient
-        """Creates a new file system under the specified account.
-
-        If the file system with the same name already exists, a ResourceExistsError will
-        be raised. This method returns a client with which to interact with the newly
-        created file system.
-
-        :param str file_system: The name of the file system to create.
-        :param metadata:
-            A dict with name-value pairs to associate with the
-            file system as metadata. Example: `{'Category':'test'}`
-        :type metadata: dict(str, str)
-        :param public_access:
-            Possible values include: file system, file.
-        :type public_access: ~azure.storage.filedatalake.PublicAccess
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.filedatalake.FileSystemClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START dsc_create_file_system]
-                :end-before: [END dsc_create_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Creating a file system in the datalake service.
-        """
-        file_system_client = self.get_file_system_client(file_system)
-        file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
-        return file_system_client
-
-    def delete_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
-                           **kwargs):
-        # type: (...) -> FileSystemClient
-        """Marks the specified file system for deletion.
-
-        The file system and any files contained within it are later deleted during garbage collection.
-        If the file system is not found, a ResourceNotFoundError will be raised.
-
-        :param file_system:
-            The file system to delete. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, delete_file_system only succeeds if the
-            file system's lease is active and matches this ID.
-            Required if the file system has an active lease.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_delete_file_system]
-                :end-before: [END bsc_delete_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Deleting a file system in the datalake service.
-        """
-        file_system_client = self.get_file_system_client(file_system)
-        file_system_client.delete_file_system(**kwargs)
-        return file_system_client
-
-    def get_file_system_client(self, file_system  # type: Union[FileSystemProperties, str]
-                               ):
-        # type: (...) -> FileSystemClient
-        """Get a client to interact with the specified file system.
-
-        The file system need not already exist.
-
-        :param file_system:
-            The file system. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :returns: A FileSystemClient.
-        :rtype: ~azure.storage.filedatalake.FileSystemClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_file_system_client]
-                :end-before: [END bsc_get_file_system_client]
-                :language: python
-                :dedent: 8
-                :caption: Getting the file system client to interact with a specific file system.
-        """
-        return FileSystemClient(self.url, file_system, credential=self._raw_credential, _configuration=self._config,
-                                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-                                require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-                                key_resolver_function=self.key_resolver_function)
-
-    def get_directory_client(self, file_system,  # type: Union[FileSystemProperties, str]
-                             directory  # type: Union[DirectoryProperties, str]
-                             ):
-        # type: (...) -> DataLakeDirectoryClient
-        """Get a client to interact with the specified directory.
-
-        The directory need not already exist.
-
-        :param file_system:
-            The file system that the directory is in. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :returns: A DataLakeDirectoryClient.
-        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_directory_client]
-                :end-before: [END bsc_get_directory_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the directory client to interact with a specific directory.
-        """
-        return DataLakeDirectoryClient(self.url, file_system, directory_name=directory,
-                                       credential=self._raw_credential,
-                                       _configuration=self._config, _pipeline=self._pipeline,
-                                       _location_mode=self._location_mode, _hosts=self._hosts,
-                                       require_encryption=self.require_encryption,
-                                       key_encryption_key=self.key_encryption_key,
-                                       key_resolver_function=self.key_resolver_function
-                                       )
-
-    def get_file_client(self, file_system,  # type: Union[FileSystemProperties, str]
-                        file_path  # type: Union[FileProperties, str]
-                        ):
-        # type: (...) -> DataLakeFileClient
-        """Get a client to interact with the specified file.
-
-        The file need not already exist.
-
-        :param file_system:
-            The file system that the file is in. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :param file_path:
-            The file with which to interact. This can either be the full path of the file(from the root directory),
-            or an instance of FileProperties. eg. directory/subdirectory/file
-        :type file_path: str or ~azure.storage.filedatalake.FileProperties
-        :returns: A DataLakeFileClient.
-        :rtype: ~azure.storage.filedatalake..DataLakeFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_file_client]
-                :end-before: [END bsc_get_file_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the file client to interact with a specific file.
-        """
-        try:
-            file_path = file_path.name
-        except AttributeError:
-            pass
-
-        return DataLakeFileClient(
-            self.url, file_system, file_path=file_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,110 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-from typing import (  # pylint: disable=unused-import
-    TYPE_CHECKING
-)
-
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \
-    ResourceNotFoundError, ResourceExistsError
-from ._shared.models import StorageErrorCode
-
-if TYPE_CHECKING:
-    pass
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def normalize_headers(headers):
-    normalized = {}
-    for key, value in headers.items():
-        if key.startswith('x-ms-'):
-            key = key[5:]
-        normalized[key.lower().replace('-', '_')] = value
-    return normalized
-
-
-def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
-    raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
-    return {k[10:]: v for k, v in raw_metadata.items()}
-
-
-def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers)
-
-
-def return_headers_and_deserialized_path_list(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers)
-
-
-def process_storage_error(storage_error):
-    raise_error = HttpResponseError
-    error_code = storage_error.response.headers.get('x-ms-error-code')
-    error_message = storage_error.message
-    additional_data = {}
-    try:
-        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
-        if error_body:
-            for info in error_body:
-                if info == 'code':
-                    error_code = error_body[info]
-                elif info == 'message':
-                    error_message = error_body[info]
-                else:
-                    additional_data[info] = error_body[info]
-    except DecodeError:
-        pass
-
-    try:
-        if error_code:
-            error_code = StorageErrorCode(error_code)
-            if error_code in [StorageErrorCode.condition_not_met]:
-                raise_error = ResourceModifiedError
-            if error_code in [StorageErrorCode.invalid_authentication_info,
-                              StorageErrorCode.authentication_failed]:
-                raise_error = ClientAuthenticationError
-            if error_code in [StorageErrorCode.resource_not_found,
-                              StorageErrorCode.invalid_property_name,
-                              StorageErrorCode.invalid_source_uri,
-                              StorageErrorCode.source_path_not_found,
-                              StorageErrorCode.lease_name_mismatch,
-                              StorageErrorCode.file_system_not_found,
-                              StorageErrorCode.path_not_found,
-                              StorageErrorCode.parent_not_found,
-                              StorageErrorCode.invalid_destination_path,
-                              StorageErrorCode.invalid_rename_source_path,
-                              StorageErrorCode.lease_is_already_broken,
-                              StorageErrorCode.invalid_source_or_destination_resource_type,
-                              StorageErrorCode.rename_destination_parent_path_not_found]:
-                raise_error = ResourceNotFoundError
-            if error_code in [StorageErrorCode.account_already_exists,
-                              StorageErrorCode.account_being_created,
-                              StorageErrorCode.resource_already_exists,
-                              StorageErrorCode.resource_type_mismatch,
-                              StorageErrorCode.source_path_is_being_deleted,
-                              StorageErrorCode.path_already_exists,
-                              StorageErrorCode.destination_path_is_being_deleted,
-                              StorageErrorCode.file_system_already_exists,
-                              StorageErrorCode.file_system_being_deleted,
-                              StorageErrorCode.path_conflict]:
-                raise_error = ResourceExistsError
-    except ValueError:
-        # Got an unknown error code
-        pass
-
-    try:
-        error_message += "\nErrorCode:{}".format(error_code.value)
-    except AttributeError:
-        error_message += "\nErrorCode:{}".format(error_code)
-    for name, info in additional_data.items():
-        error_message += "\n{}:{}".format(name, info)
-
-    error = raise_error(message=error_message, response=storage_error.response)
-    error.error_code = error_code
-    error.additional_info = additional_data
-    raise error
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,655 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-import functools
-
-try:
-    from urllib.parse import urlparse, quote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote  # type: ignore
-
-import six
-from azure.core.paging import ItemPaged
-from azure.storage.blob import ContainerClient
-from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str
-from ._serialize import convert_dfs_url_to_blob_url
-from ._models import LocationMode, FileSystemProperties, PathPropertiesPaged
-from ._data_lake_file_client import DataLakeFileClient
-from ._data_lake_directory_client import DataLakeDirectoryClient
-from ._data_lake_lease import DataLakeLeaseClient
-from ._generated import DataLakeStorageClient
-
-
-class FileSystemClient(StorageAccountHostsMixin):
-    """A client to interact with a specific file system, even if that file system
-    may not yet exist.
-
-    For operations relating to a specific directory or file within this file system, a directory client or file client
-    can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
-
-    :ivar str url:
-        The full endpoint URL to the file system, including SAS token if used.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URI to the storage account.
-    :param file_system_name:
-        The file system for the directory or files.
-    :type file_system_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_file_system_samples.py
-            :start-after: [START create_file_system_client_from_service]
-            :end-before: [END create_file_system_client_from_service]
-            :language: python
-            :dedent: 8
-            :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
-
-        .. literalinclude:: ../samples/test_file_system_samples.py
-            :start-after: [START create_file_system_client_sasurl]
-            :end-before: [END create_file_system_client_sasurl]
-            :language: python
-            :dedent: 8
-            :caption: Creating the FileSystemClient client directly.
-    """
-    def __init__(
-        self, account_url,  # type: str
-        file_system_name,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not file_system_name:
-            raise ValueError("Please specify a file system name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        blob_account_url = convert_dfs_url_to_blob_url(account_url)
-        # TODO: add self.account_url to base_client and remove _blob_account_url
-        self._blob_account_url = blob_account_url
-
-        datalake_hosts = kwargs.pop('_hosts', None)
-        blob_hosts = None
-        if datalake_hosts:
-            blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
-            blob_secondary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.SECONDARY])
-            blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url,
-                          LocationMode.SECONDARY: blob_secondary_account_url}
-        self._container_client = ContainerClient(blob_account_url, file_system_name,
-                                                 credential=credential, _hosts=blob_hosts, **kwargs)
-
-        _, sas_token = parse_query(parsed_url.query)
-        self.file_system_name = file_system_name
-        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
-
-        super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
-                                               _hosts=datalake_hosts, **kwargs)
-        self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline)
-
-    def _format_url(self, hostname):
-        file_system_name = self.file_system_name
-        if isinstance(file_system_name, six.text_type):
-            file_system_name = file_system_name.encode('UTF-8')
-        return "{}://{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(file_system_name),
-            self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            file_system_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> FileSystemClient
-        """
-        Create FileSystemClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param file_system_name: The name of file system to interact with.
-        :type file_system_name: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, and account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-            Credentials provided here will take precedence over those in the connection string.
-        :return a FileSystemClient
-        :rtype ~azure.storage.filedatalake.FileSystemClient
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, file_system_name=file_system_name, credential=credential, **kwargs)
-
-    def acquire_lease(
-        self, lease_duration=-1,  # type: int
-        lease_id=None,  # type: Optional[str]
-        **kwargs
-    ):
-        # type: (...) -> DataLakeLeaseClient
-        """
-        Requests a new lease. If the file system does not have an active lease,
-        the DataLake service creates a lease on the file system and returns a new
-        lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The DataLake service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
-        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START acquire_lease_on_file_system]
-                :end-before: [END acquire_lease_on_file_system]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on the file_system.
-        """
-        lease = DataLakeLeaseClient(self, lease_id=lease_id)
-        lease.acquire(lease_duration=lease_duration, **kwargs)
-        return lease
-
-    def create_file_system(self, metadata=None,  # type: Optional[Dict[str, str]]
-                           public_access=None,  # type: Optional[PublicAccess]
-                           **kwargs):
-        # type: (...) ->  Dict[str, Union[str, datetime]]
-        """Creates a new file system under the specified account.
-
-        If the file system with the same name already exists, a ResourceExistsError will
-        be raised. This method returns a client with which to interact with the newly
-        created file system.
-
-        :param metadata:
-            A dict with name-value pairs to associate with the
-            file system as metadata. Example: `{'Category':'test'}`
-        :type metadata: dict(str, str)
-        :param public_access:
-            Possible values include: file system, file.
-        :type public_access: ~azure.storage.filedatalake.PublicAccess
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.filedatalake.FileSystemClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START create_file_system]
-                :end-before: [END create_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Creating a file system in the datalake service.
-        """
-        return self._container_client.create_container(metadata=metadata,
-                                                       public_access=public_access,
-                                                       **kwargs)
-
-    def delete_file_system(self, **kwargs):
-        # type: (Any) -> None
-        """Marks the specified file system for deletion.
-
-        The file system and any files contained within it are later deleted during garbage collection.
-        If the file system is not found, a ResourceNotFoundError will be raised.
-
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, delete_file_system only succeeds if the
-            file system's lease is active and matches this ID.
-            Required if the file system has an active lease.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START delete_file_system]
-                :end-before: [END delete_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Deleting a file system in the datalake service.
-        """
-        self._container_client.delete_container(**kwargs)
-
-    def get_file_system_properties(self, **kwargs):
-        # type: (Any) -> FileSystemProperties
-        """Returns all user-defined metadata and system properties for the specified
-        file system. The data returned does not include the file system's list of paths.
-
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, get_file_system_properties only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Properties for the specified file system within a file system object.
-        :rtype: ~azure.storage.filedatalake.FileSystemProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START get_file_system_properties]
-                :end-before: [END get_file_system_properties]
-                :language: python
-                :dedent: 12
-                :caption: Getting properties on the file system.
-        """
-        container_properties = self._container_client.get_container_properties(**kwargs)
-        return FileSystemProperties._convert_from_container_props(container_properties)  # pylint: disable=protected-access
-
-    def set_file_system_metadata(  # type: ignore
-        self, metadata=None,  # type: Optional[Dict[str, str]]
-        **kwargs
-    ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets one or more user-defined name-value pairs for the specified
-        file system. Each call to this operation replaces all existing metadata
-        attached to the file system. To remove all metadata from the file system,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the file system as
-            metadata. Example: {'category':'test'}
-        :type metadata: dict[str, str]
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, set_file_system_metadata only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: file system-updated property dict (Etag and last modified).
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START set_file_system_metadata]
-                :end-before: [END set_file_system_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Setting metadata on the container.
-        """
-        return self._container_client.set_container_metadata(metadata=metadata, **kwargs)
-
-    def get_paths(self, path=None, # type: Optional[str]
-                  recursive=True,  # type: Optional[bool]
-                  max_results=None,  # type: Optional[int]
-                  **kwargs):
-        # type: (...) -> ItemPaged[PathProperties]
-        """Returns a generator to list the paths(could be files or directories) under the specified file system.
-        The generator will lazily follow the continuation tokens returned by
-        the service.
-
-        :param str path:
-            Filters the results to return only paths under the specified path.
-        :param int max_results: An optional value that specifies the maximum
-            number of items to return per page. If omitted or greater than 5,000, the
-            response will include up to 5,000 items per page.
-        :keyword upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) response of PathProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_containers.py
-                :start-after: [START list_blobs_in_container]
-                :end-before: [END list_blobs_in_container]
-                :language: python
-                :dedent: 8
-                :caption: List the blobs in the container.
-        """
-        timeout = kwargs.pop('timeout', None)
-        command = functools.partial(
-            self._client.file_system.list_paths,
-            path=path,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-            command, recursive, path=path, max_results=max_results,
-            page_iterator_class=PathPropertiesPaged, **kwargs)
-
-    def create_directory(self, directory,  # type: Union[DirectoryProperties, str]
-                         content_settings=None,  # type: Optional[ContentSettings]
-                         metadata=None,  # type: Optional[Dict[str, str]]
-                         **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Create directory
-
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient
-        """
-        directory_client = self.get_directory_client(directory)
-        directory_client.create_directory(content_settings=content_settings, metadata=metadata, **kwargs)
-        return directory_client
-
-    def delete_directory(self, directory,  # type: Union[DirectoryProperties, str]
-                         **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Marks the specified path for deletion.
-
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient
-        """
-        directory_client = self.get_directory_client(directory)
-        directory_client.delete_directory(**kwargs)
-        return directory_client
-
-    def create_file(self, file,  # type: Union[FileProperties, str]
-                    **kwargs):
-        # type: (...) -> DataLakeFileClient
-        """
-        Create file
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties.
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeFileClient
-        """
-        file_client = self.get_file_client(file)
-        file_client.create_file(**kwargs)
-        return file_client
-
-    def delete_file(self, file,  # type: Union[FileProperties, str]
-                    lease=None,  # type: Optional[Union[DataLakeLeaseClient, str]]
-                    **kwargs):
-        # type: (...) -> DataLakeFileClient
-        """
-        Marks the specified file for deletion.
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties.
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeFileClient
-        """
-        file_client = self.get_file_client(file)
-        file_client.delete_file(lease=lease, **kwargs)
-        return file_client
-
-    def get_directory_client(self, directory  # type: Union[DirectoryProperties, str]
-                             ):
-        # type: (...) -> DataLakeDirectoryClient
-        """Get a client to interact with the specified directory.
-
-        The directory need not already exist.
-
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :returns: A DataLakeDirectoryClient.
-        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START get_directory_client_from_file_system]
-                :end-before: [END get_directory_client_from_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Getting the directory client to interact with a specific directory.
-        """
-        return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory,
-                                       credential=self._raw_credential,
-                                       _configuration=self._config, _pipeline=self._pipeline,
-                                       _location_mode=self._location_mode, _hosts=self._hosts,
-                                       require_encryption=self.require_encryption,
-                                       key_encryption_key=self.key_encryption_key,
-                                       key_resolver_function=self.key_resolver_function
-                                       )
-
-    def get_file_client(self, file_path  # type: Union[FileProperties, str]
-                        ):
-        # type: (...) -> DataLakeFileClient
-        """Get a client to interact with the specified file.
-
-        The file need not already exist.
-
-        :param file_path:
-            The file with which to interact. This can either be the path of the file(from root directory),
-            or an instance of FileProperties. eg. directory/subdirectory/file
-        :type file_path: str or ~azure.storage.filedatalake.FileProperties
-        :returns: A DataLakeFileClient.
-        :rtype: ~azure.storage.filedatalake..DataLakeFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START get_file_client_from_file_system]
-                :end-before: [END get_file_client_from_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Getting the file client to interact with a specific file.
-        """
-        try:
-            file_path = file_path.name
-        except AttributeError:
-            pass
-
-        return DataLakeFileClient(
-            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,18 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._data_lake_storage_client import DataLakeStorageClient
-__all__ = ['DataLakeStorageClient']
-
-from .version import VERSION
-
-__version__ = VERSION
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_configuration.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_configuration.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_configuration.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_configuration.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,64 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from .version import VERSION
-
-
-class DataLakeStorageClientConfiguration(Configuration):
-    """Configuration for DataLakeStorageClient
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    :param file_system: The filesystem identifier.
-    :type file_system: str
-    :param path1: The file or directory path.
-    :type path1: str
-    :ivar resource: The value must be "filesystem" for all filesystem
-     operations.
-    :type resource: str
-    :ivar version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    """
-
-    def __init__(self, url, file_system, path1, **kwargs):
-
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-        if file_system is None:
-            raise ValueError("Parameter 'file_system' must not be None.")
-
-        super(DataLakeStorageClientConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION))
-        self.generate_client_request_id = True
-
-        self.url = url
-        self.file_system = file_system
-        self.path1 = path1
-        self.resource = "filesystem"
-        self.version = "2019-02-02"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_data_lake_storage_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_data_lake_storage_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_data_lake_storage_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_data_lake_storage_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,65 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import PipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration import DataLakeStorageClientConfiguration
-from azure.core.exceptions import map_error
-from .operations import ServiceOperations
-from .operations import FileSystemOperations
-from .operations import PathOperations
-from . import models
-
-
-class DataLakeStorageClient(object):
-    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.file.datalake.operations.ServiceOperations
-    :ivar file_system: FileSystem operations
-    :vartype file_system: azure.storage.file.datalake.operations.FileSystemOperations
-    :ivar path: Path operations
-    :vartype path: azure.storage.file.datalake.operations.PathOperations
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    :param file_system: The filesystem identifier.
-    :type file_system: str
-    :param path1: The file or directory path.
-    :type path1: str
-    """
-
-    def __init__(self, url, file_system, path1, **kwargs):
-
-        base_url = '{url}'
-        self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs)
-        self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2018-11-09'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.file_system = FileSystemOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.path = PathOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-    def __exit__(self, *exc_details):
-        self._client.__exit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._data_lake_storage_client_async import DataLakeStorageClient
-__all__ = ['DataLakeStorageClient']
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_configuration_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_configuration_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_configuration_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_configuration_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,65 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from ..version import VERSION
-
-
-class DataLakeStorageClientConfiguration(Configuration):
-    """Configuration for DataLakeStorageClient
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    :param file_system: The filesystem identifier.
-    :type file_system: str
-    :param path1: The file or directory path.
-    :type path1: str
-    :ivar resource: The value must be "filesystem" for all filesystem
-     operations.
-    :type resource: str
-    :ivar version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    """
-
-    def __init__(self, url, file_system, path1, **kwargs):
-
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-        # if file_system is None:
-        #     raise ValueError("Parameter 'file_system' must not be None.")
-
-        super(DataLakeStorageClientConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION))
-        self.generate_client_request_id = True
-        self.accept_language = None
-
-        self.url = url
-        self.file_system = file_system
-        self.path1 = path1
-        self.resource = "filesystem"
-        self.version = "2019-02-02"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,66 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import AsyncPipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration_async import DataLakeStorageClientConfiguration
-from azure.core.exceptions import map_error
-from .operations_async import ServiceOperations
-from .operations_async import FileSystemOperations
-from .operations_async import PathOperations
-from .. import models
-
-
-class DataLakeStorageClient(object):
-    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.file.datalake.aio.operations_async.ServiceOperations
-    :ivar file_system: FileSystem operations
-    :vartype file_system: azure.storage.file.datalake.aio.operations_async.FileSystemOperations
-    :ivar path: Path operations
-    :vartype path: azure.storage.file.datalake.aio.operations_async.PathOperations
-
-    :param url: The URL of the service account, container, or blob that is the
-     targe of the desired operation.
-    :type url: str
-    :param file_system: The filesystem identifier.
-    :type file_system: str
-    :param path1: The file or directory path.
-    :type path1: str
-    """
-
-    def __init__(
-            self, url, file_system, path1, **kwargs):
-
-        base_url = '{url}'
-        self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs)
-        self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2018-11-09'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.file_system = FileSystemOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.path = PathOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-    async def __aexit__(self, *exc_details):
-        await self._client.__aexit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations_async import ServiceOperations
-from ._file_system_operations_async import FileSystemOperations
-from ._path_operations_async import PathOperations
-
-__all__ = [
-    'ServiceOperations',
-    'FileSystemOperations',
-    'PathOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_file_system_operations_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_file_system_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_file_system_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_file_system_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,462 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class FileSystemOperations:
-    """FileSystemOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    async def create(self, properties=None, request_id=None, timeout=None, *, cls=None, **kwargs):
-        """Create FileSystem.
-
-        Create a FileSystem rooted at the specified location. If the FileSystem
-        already exists, the operation fails.  This operation does not support
-        conditional HTTP requests.
-
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{filesystem}'}
-
-    async def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Set FileSystem Properties.
-
-        Set properties for the FileSystem.  This operation supports conditional
-        HTTP requests.  For more information, see [Specifying Conditional
-        Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/{filesystem}'}
-
-    async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwargs):
-        """Get FileSystem Properties.
-
-        All system and user-defined filesystem properties are specified in the
-        response headers.
-
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{filesystem}'}
-
-    async def delete(self, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Delete FileSystem.
-
-        Marks the FileSystem for deletion.  When a FileSystem is deleted, a
-        FileSystem with the same identifier cannot be created for at least 30
-        seconds. While the filesystem is being deleted, attempts to create a
-        filesystem with the same identifier will fail with status code 409
-        (Conflict), with the service returning additional error information
-        indicating that the filesystem is being deleted. All other operations,
-        including operations on any files or directories within the filesystem,
-        will fail with status code 404 (Not Found) while the filesystem is
-        being deleted. This operation supports conditional HTTP requests.  For
-        more information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{filesystem}'}
-
-    async def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, *, cls=None, **kwargs):
-        """List Paths.
-
-        List FileSystem paths and their properties.
-
-        :param recursive: Required
-        :type recursive: bool
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param path: Optional.  Filters results to paths within the specified
-         directory. An error occurs if the directory does not exist.
-        :type path: str
-        :param max_results: An optional value that specifies the maximum
-         number of items to return. If omitted or greater than 5,000, the
-         response will include up to 5,000 items.
-        :type max_results: int
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: PathList or the result of cls(response)
-        :rtype: ~azure.storage.file.datalake.models.PathList
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.list_paths.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if path is not None:
-            query_parameters['directory'] = self._serialize.query("path", path, 'str')
-        query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
-        if max_results is not None:
-            query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('PathList', response)
-            header_dict = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_paths.metadata = {'url': '/{filesystem}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_path_operations_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_path_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_path_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_path_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1459 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class PathOperations:
-    """PathOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    async def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
-        """Create File | Create Directory | Rename File | Rename Directory.
-
-        Create or rename a file or directory.    By default, the destination is
-        overwritten and if the destination already exists and has a lease the
-        lease is broken.  This operation supports conditional HTTP requests.
-        For more information, see [Specifying Conditional Headers for Blob
-        Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param resource: Required only for Create File and Create Directory.
-         The value must be "file" or "directory". Possible values include:
-         'directory', 'file'
-        :type resource: str or
-         ~azure.storage.file.datalake.models.PathResourceType
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param mode: Optional. Valid only when namespace is enabled. This
-         parameter determines the behavior of the rename operation. The value
-         must be "legacy" or "posix", and the default value will be "posix".
-         Possible values include: 'legacy', 'posix'
-        :type mode: str or ~azure.storage.file.datalake.models.PathRenameMode
-        :param rename_source: An optional file or directory to be renamed.
-         The value must have the following format: "/{filesystem}/{path}".  If
-         "x-ms-properties" is specified, the properties will overwrite the
-         existing properties; otherwise, the existing properties will be
-         preserved. This value must be a URL percent-encoded string. Note that
-         the string may only contain ASCII characters in the ISO-8859-1
-         character set.
-        :type rename_source: str
-        :param source_lease_id: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease_id: str
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param umask: Optional and only valid if Hierarchical Namespace is
-         enabled for the account. When creating a file or directory and the
-         parent folder does not have a default ACL, the umask restricts the
-         permissions of the file or directory to be created.  The resulting
-         permission is given by p bitwise and not u, where p is the permission
-         and u is the umask.  For example, if p is 0777 and u is 0057, then the
-         resulting permission is 0720.  The default permission is 0777 for a
-         directory and 0666 for a file.  The default umask is 0027.  The umask
-         must be specified in 4-digit octal notation (e.g. 0766).
-        :type umask: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.file.datalake.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if path_http_headers is not None:
-            cache_control = path_http_headers.cache_control
-        content_encoding = None
-        if path_http_headers is not None:
-            content_encoding = path_http_headers.content_encoding
-        content_language = None
-        if path_http_headers is not None:
-            content_language = path_http_headers.content_language
-        content_disposition = None
-        if path_http_headers is not None:
-            content_disposition = path_http_headers.content_disposition
-        content_type = None
-        if path_http_headers is not None:
-            content_type = path_http_headers.content_type
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if resource is not None:
-            query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType')
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if mode is not None:
-            query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if rename_source is not None:
-            header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
-        if source_lease_id is not None:
-            header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
-        if umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def update(self, action, body, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Append Data | Flush Data | Set Properties | Set Access Control.
-
-        Uploads data to be appended to a file, flushes (writes) previously
-        uploaded data to a file, sets properties for a file or directory, or
-        sets access control for a file or directory. Data can only be appended
-        to a file. This operation supports conditional HTTP requests. For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param action: The action must be "append" to upload data to be
-         appended to a file, "flush" to flush previously uploaded data to a
-         file, "setProperties" to set the properties of a file or directory, or
-         "setAccessControl" to set the owner, group, permissions, or access
-         control list for a file or directory.  Note that Hierarchical
-         Namespace must be enabled for the account in order to use access
-         control.  Also note that the Access Control List (ACL) includes
-         permissions for the owner, owning group, and others, so the
-         x-ms-permissions and x-ms-acl request headers are mutually exclusive.
-         Possible values include: 'append', 'flush', 'setProperties',
-         'setAccessControl'
-        :type action: str or
-         ~azure.storage.file.datalake.models.PathUpdateAction
-        :param body: Initial data
-        :type body: Generator
-        :param position: This parameter allows the caller to upload data in
-         parallel and control the order in which it is appended to the file.
-         It is required when uploading data to be appended to the file and when
-         flushing previously uploaded data to the file.  The value must be the
-         position where the data is to be appended.  Uploaded data is not
-         immediately flushed, or written, to the file.  To flush, the
-         previously uploaded data must be contiguous, the position parameter
-         must be specified and equal to the length of the file after all data
-         has been written, and there must not be a request entity body included
-         with the request.
-        :type position: long
-        :param retain_uncommitted_data: Valid only for flush operations.  If
-         "true", uncommitted data is retained after the flush operation
-         completes; otherwise, the uncommitted data is deleted after the flush
-         operation.  The default is false.  Data at offsets less than the
-         specified position are written to the file when flush succeeds, but
-         this optional parameter allows data after the flush position to be
-         retained for a future flush operation.
-        :type retain_uncommitted_data: bool
-        :param close: Azure Storage Events allow applications to receive
-         notifications when files change. When Azure Storage Events are
-         enabled, a file changed event is raised. This event has a property
-         indicating whether this is the final change to distinguish the
-         difference between an intermediate flush to a file stream and the
-         final close of a file stream. The close query parameter is valid only
-         when the action is "flush" and change notifications are enabled. If
-         the value of close is "true" and the flush operation completes
-         successfully, the service raises a file change notification with a
-         property indicating that this is the final update (the file stream has
-         been closed). If "false" a change notification is raised indicating
-         the file has changed. The default is false. This query parameter is
-         set to true by the Hadoop ABFS driver to indicate that the file stream
-         has been closed."
-        :type close: bool
-        :param content_length: Required for "Append Data" and "Flush Data".
-         Must be 0 for "Flush Data".  Must be the length of the request content
-         in bytes for "Append Data".
-        :type content_length: long
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param acl: Sets POSIX access control rights on files and directories.
-         The value is a comma-separated list of access control entries. Each
-         access control entry (ACE) consists of a scope, a type, a user or
-         group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        content_md5 = None
-        if path_http_headers is not None:
-            content_md5 = path_http_headers.content_md5
-        cache_control = None
-        if path_http_headers is not None:
-            cache_control = path_http_headers.cache_control
-        content_type = None
-        if path_http_headers is not None:
-            content_type = path_http_headers.content_type
-        content_disposition = None
-        if path_http_headers is not None:
-            content_disposition = path_http_headers.content_disposition
-        content_encoding = None
-        if path_http_headers is not None:
-            content_encoding = path_http_headers.content_encoding
-        content_language = None
-        if path_http_headers is not None:
-            content_language = path_http_headers.content_language
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.update.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction')
-        if position is not None:
-            query_parameters['position'] = self._serialize.query("position", position, 'long')
-        if retain_uncommitted_data is not None:
-            query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
-        if close is not None:
-            query_parameters['close'] = self._serialize.query("close", close, 'bool')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        if content_length is not None:
-            header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
-        if acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    update.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Lease Path.
-
-        Create and manage a lease to restrict write and delete access to the
-        path. This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param x_ms_lease_action: There are five lease actions: "acquire",
-         "break", "change", "renew", and "release". Use "acquire" and specify
-         the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a
-         new lease. Use "break" to break an existing lease. When a lease is
-         broken, the lease break period is allowed to elapse, during which time
-         no lease operation except break and release can be performed on the
-         file. When a lease is successfully broken, the response indicates the
-         interval in seconds until a new lease can be acquired. Use "change"
-         and specify the current lease ID in "x-ms-lease-id" and the new lease
-         ID in "x-ms-proposed-lease-id" to change the lease ID of an active
-         lease. Use "renew" and specify the "x-ms-lease-id" to renew an
-         existing lease. Use "release" and specify the "x-ms-lease-id" to
-         release a lease. Possible values include: 'acquire', 'break',
-         'change', 'renew', 'release'
-        :type x_ms_lease_action: str or
-         ~azure.storage.file.datalake.models.PathLeaseAction
-        :param x_ms_lease_duration: The lease duration is required to acquire
-         a lease, and specifies the duration of the lease in seconds.  The
-         lease duration must be between 15 and 60 seconds or -1 for infinite
-         lease.
-        :type x_ms_lease_duration: int
-        :param x_ms_lease_break_period: The lease break period duration is
-         optional to break a lease, and  specifies the break period of the
-         lease in seconds.  The lease break  duration must be between 0 and 60
-         seconds.
-        :type x_ms_lease_break_period: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction')
-        if x_ms_lease_duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int')
-        if x_ms_lease_break_period is not None:
-            header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 201, 202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    lease.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Read File.
-
-        Read the contents of a file.  For read operations, range requests are
-        supported. This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param range: The HTTP Range request header specifies one or more byte
-         ranges of the resource to be retrieved.
-        :type range: str
-        :param x_ms_range_get_content_md5: Optional. When this header is set
-         to "true" and specified together with the Range header, the service
-         returns the MD5 hash for the range, as long as the range is less than
-         or equal to 4MB in size. If this header is specified without the Range
-         header, the service returns status code 400 (Bad Request). If this
-         header is set to true when the range exceeds 4 MB in size, the service
-         returns status code 400 (Bad Request).
-        :type x_ms_range_get_content_md5: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.read.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        if range is not None:
-            header_parameters['Range'] = self._serialize.header("range", range, 'str')
-        if x_ms_range_get_content_md5 is not None:
-            header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 206]:
-            await response.load_body()
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
-                'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-        if response.status_code == 206:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
-                'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    read.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Get Properties | Get Status | Get Access Control List.
-
-        Get Properties returns all system and user defined properties for a
-        path. Get Status returns all system defined properties for a path. Get
-        Access Control List returns the access control list for a path. This
-        operation supports conditional HTTP requests.  For more information,
-        see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param action: Optional. If the value is "getStatus" only the system
-         defined properties for the path are returned. If the value is
-         "getAccessControl" the access control list is returned in the response
-         headers (Hierarchical Namespace must be enabled for the account),
-         otherwise the properties are returned. Possible values include:
-         'getAccessControl', 'getStatus'
-        :type action: str or
-         ~azure.storage.file.datalake.models.PathGetPropertiesAction
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if action is not None:
-            query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction')
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
-                'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
-                'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
-                'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
-                'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Delete File | Delete Directory.
-
-        Delete the file or directory. This operation supports conditional HTTP
-        requests.  For more information, see [Specifying Conditional Headers
-        for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param recursive: Required
-        :type recursive: bool
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if recursive is not None:
-            query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a path.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param acl: Sets POSIX access control rights on files and directories.
-         The value is a comma-separated list of access control entries. Each
-         access control entry (ACE) consists of a scope, a type, a user or
-         group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "setAccessControl"
-
-        # Construct URL
-        url = self.set_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
-        if acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-            }
-            return cls(response, None, response_headers)
-    set_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a path.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param position: This parameter allows the caller to upload data in
-         parallel and control the order in which it is appended to the file.
-         It is required when uploading data to be appended to the file and when
-         flushing previously uploaded data to the file.  The value must be the
-         position where the data is to be appended.  Uploaded data is not
-         immediately flushed, or written, to the file.  To flush, the
-         previously uploaded data must be contiguous, the position parameter
-         must be specified and equal to the length of the file after all data
-         has been written, and there must not be a request entity body included
-         with the request.
-        :type position: long
-        :param retain_uncommitted_data: Valid only for flush operations.  If
-         "true", uncommitted data is retained after the flush operation
-         completes; otherwise, the uncommitted data is deleted after the flush
-         operation.  The default is false.  Data at offsets less than the
-         specified position are written to the file when flush succeeds, but
-         this optional parameter allows data after the flush position to be
-         retained for a future flush operation.
-        :type retain_uncommitted_data: bool
-        :param close: Azure Storage Events allow applications to receive
-         notifications when files change. When Azure Storage Events are
-         enabled, a file changed event is raised. This event has a property
-         indicating whether this is the final change to distinguish the
-         difference between an intermediate flush to a file stream and the
-         final close of a file stream. The close query parameter is valid only
-         when the action is "flush" and change notifications are enabled. If
-         the value of close is "true" and the flush operation completes
-         successfully, the service raises a file change notification with a
-         property indicating that this is the final update (the file stream has
-         been closed). If "false" a change notification is raised indicating
-         the file has changed. The default is false. This query parameter is
-         set to true by the Hadoop ABFS driver to indicate that the file stream
-         has been closed."
-        :type close: bool
-        :param content_length: Required for "Append Data" and "Flush Data".
-         Must be 0 for "Flush Data".  Must be the length of the request content
-         in bytes for "Append Data".
-        :type content_length: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        content_md5 = None
-        if path_http_headers is not None:
-            content_md5 = path_http_headers.content_md5
-        cache_control = None
-        if path_http_headers is not None:
-            cache_control = path_http_headers.cache_control
-        content_type = None
-        if path_http_headers is not None:
-            content_type = path_http_headers.content_type
-        content_disposition = None
-        if path_http_headers is not None:
-            content_disposition = path_http_headers.content_disposition
-        content_encoding = None
-        if path_http_headers is not None:
-            content_encoding = path_http_headers.content_encoding
-        content_language = None
-        if path_http_headers is not None:
-            content_language = path_http_headers.content_language
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "flush"
-
-        # Construct URL
-        url = self.flush_data.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if position is not None:
-            query_parameters['position'] = self._serialize.query("position", position, 'long')
-        if retain_uncommitted_data is not None:
-            query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
-        if close is not None:
-            query_parameters['close'] = self._serialize.query("close", close, 'bool')
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if content_length is not None:
-            header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-            }
-            return cls(response, None, response_headers)
-    flush_data.metadata = {'url': '/{filesystem}/{path}'}
-
-    async def append_data(self, body, position=None, timeout=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Append data to the file.
-
-        :param body: Initial data
-        :type body: Generator
-        :param position: This parameter allows the caller to upload data in
-         parallel and control the order in which it is appended to the file.
-         It is required when uploading data to be appended to the file and when
-         flushing previously uploaded data to the file.  The value must be the
-         position where the data is to be appended.  Uploaded data is not
-         immediately flushed, or written, to the file.  To flush, the
-         previously uploaded data must be contiguous, the position parameter
-         must be specified and equal to the length of the file after all data
-         has been written, and there must not be a request entity body included
-         with the request.
-        :type position: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param content_length: Required for "Append Data" and "Flush Data".
-         Must be 0 for "Flush Data".  Must be the length of the request content
-         in bytes for "Append Data".
-        :type content_length: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        transactional_content_hash = None
-        if path_http_headers is not None:
-            transactional_content_hash = path_http_headers.transactional_content_hash
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        action = "append"
-
-        # Construct URL
-        url = self.append_data.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if position is not None:
-            query_parameters['position'] = self._serialize.query("position", position, 'long')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-        if content_length is not None:
-            header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if transactional_content_hash is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-            }
-            return cls(response, None, response_headers)
-    append_data.metadata = {'url': '/{filesystem}/{path}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,128 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class ServiceOperations:
-    """ServiceOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar resource: The value must be "account" for all account operations. Constant value: "account".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.resource = "account"
-
-    async def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, *, cls=None, **kwargs):
-        """List FileSystems.
-
-        List filesystems and their properties in given account.
-
-        :param prefix: Filters results to filesystems within the specified
-         prefix.
-        :type prefix: str
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param max_results: An optional value that specifies the maximum
-         number of items to return. If omitted or greater than 5,000, the
-         response will include up to 5,000 items.
-        :type max_results: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: FileSystemList or the result of cls(response)
-        :rtype: ~azure.storage.file.datalake.models.FileSystemList
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.list_file_systems.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if max_results is not None:
-            query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('FileSystemList', response)
-            header_dict = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_file_systems.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,58 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-try:
-    from ._models_py3 import FileSystem
-    from ._models_py3 import FileSystemList
-    from ._models_py3 import LeaseAccessConditions
-    from ._models_py3 import ModifiedAccessConditions
-    from ._models_py3 import Path
-    from ._models_py3 import PathHTTPHeaders
-    from ._models_py3 import PathList
-    from ._models_py3 import SourceModifiedAccessConditions
-    from ._models_py3 import StorageError, StorageErrorException
-    from ._models_py3 import StorageErrorError
-except (SyntaxError, ImportError):
-    from ._models import FileSystem
-    from ._models import FileSystemList
-    from ._models import LeaseAccessConditions
-    from ._models import ModifiedAccessConditions
-    from ._models import Path
-    from ._models import PathHTTPHeaders
-    from ._models import PathList
-    from ._models import SourceModifiedAccessConditions
-    from ._models import StorageError, StorageErrorException
-    from ._models import StorageErrorError
-from ._data_lake_storage_client_enums import (
-    PathGetPropertiesAction,
-    PathLeaseAction,
-    PathRenameMode,
-    PathResourceType,
-    PathUpdateAction,
-)
-
-__all__ = [
-    'FileSystem',
-    'FileSystemList',
-    'LeaseAccessConditions',
-    'ModifiedAccessConditions',
-    'Path',
-    'PathHTTPHeaders',
-    'PathList',
-    'SourceModifiedAccessConditions',
-    'StorageError', 'StorageErrorException',
-    'StorageErrorError',
-    'PathResourceType',
-    'PathRenameMode',
-    'PathUpdateAction',
-    'PathLeaseAction',
-    'PathGetPropertiesAction',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_data_lake_storage_client_enums.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_data_lake_storage_client_enums.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_data_lake_storage_client_enums.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_data_lake_storage_client_enums.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,47 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-class PathResourceType(str, Enum):
-
-    directory = "directory"
-    file = "file"
-
-
-class PathRenameMode(str, Enum):
-
-    legacy = "legacy"
-    posix = "posix"
-
-
-class PathUpdateAction(str, Enum):
-
-    append = "append"
-    flush = "flush"
-    set_properties = "setProperties"
-    set_access_control = "setAccessControl"
-
-
-class PathLeaseAction(str, Enum):
-
-    acquire = "acquire"
-    break_enum = "break"
-    change = "change"
-    renew = "renew"
-    release = "release"
-
-
-class PathGetPropertiesAction(str, Enum):
-
-    get_access_control = "getAccessControl"
-    get_status = "getStatus"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,297 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class FileSystem(Model):
-    """FileSystem.
-
-    :param name:
-    :type name: str
-    :param last_modified:
-    :type last_modified: str
-    :param e_tag:
-    :type e_tag: str
-    """
-
-    _attribute_map = {
-        'name': {'key': 'name', 'type': 'str'},
-        'last_modified': {'key': 'lastModified', 'type': 'str'},
-        'e_tag': {'key': 'eTag', 'type': 'str'},
-    }
-
-    def __init__(self, **kwargs):
-        super(FileSystem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.last_modified = kwargs.get('last_modified', None)
-        self.e_tag = kwargs.get('e_tag', None)
-
-
-class FileSystemList(Model):
-    """FileSystemList.
-
-    :param filesystems:
-    :type filesystems: list[~azure.storage.file.datalake.models.FileSystem]
-    """
-
-    _attribute_map = {
-        'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'},
-    }
-
-    def __init__(self, **kwargs):
-        super(FileSystemList, self).__init__(**kwargs)
-        self.filesystems = kwargs.get('filesystems', None)
-
-
-class LeaseAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param lease_id: If specified, the operation only succeeds if the
-     resource's lease is active and matches this ID.
-    :type lease_id: str
-    """
-
-    _attribute_map = {
-        'lease_id': {'key': '', 'type': 'str'},
-    }
-
-    def __init__(self, **kwargs):
-        super(LeaseAccessConditions, self).__init__(**kwargs)
-        self.lease_id = kwargs.get('lease_id', None)
-
-
-class ModifiedAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param if_modified_since: Specify this header value to operate only on a
-     blob if it has been modified since the specified date/time.
-    :type if_modified_since: datetime
-    :param if_unmodified_since: Specify this header value to operate only on a
-     blob if it has not been modified since the specified date/time.
-    :type if_unmodified_since: datetime
-    :param if_match: Specify an ETag value to operate only on blobs with a
-     matching value.
-    :type if_match: str
-    :param if_none_match: Specify an ETag value to operate only on blobs
-     without a matching value.
-    :type if_none_match: str
-    """
-
-    _attribute_map = {
-        'if_modified_since': {'key': '', 'type': 'rfc-1123'},
-        'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
-        'if_match': {'key': '', 'type': 'str'},
-        'if_none_match': {'key': '', 'type': 'str'},
-    }
-
-    def __init__(self, **kwargs):
-        super(ModifiedAccessConditions, self).__init__(**kwargs)
-        self.if_modified_since = kwargs.get('if_modified_since', None)
-        self.if_unmodified_since = kwargs.get('if_unmodified_since', None)
-        self.if_match = kwargs.get('if_match', None)
-        self.if_none_match = kwargs.get('if_none_match', None)
-
-
-class Path(Model):
-    """Path.
-
-    :param name:
-    :type name: str
-    :param is_directory:  Default value: False .
-    :type is_directory: bool
-    :param last_modified:
-    :type last_modified: str
-    :param e_tag:
-    :type e_tag: str
-    :param content_length:
-    :type content_length: long
-    :param owner:
-    :type owner: str
-    :param group:
-    :type group: str
-    :param permissions:
-    :type permissions: str
-    """
-
-    _attribute_map = {
-        'name': {'key': 'name', 'type': 'str'},
-        'is_directory': {'key': 'isDirectory', 'type': 'bool'},
-        'last_modified': {'key': 'lastModified', 'type': 'str'},
-        'etag': {'key': 'etag', 'type': 'str'},
-        'content_length': {'key': 'contentLength', 'type': 'long'},
-        'owner': {'key': 'owner', 'type': 'str'},
-        'group': {'key': 'group', 'type': 'str'},
-        'permissions': {'key': 'permissions', 'type': 'str'},
-    }
-
-    def __init__(self, **kwargs):
-        super(Path, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.is_directory = kwargs.get('is_directory', False)
-        self.last_modified = kwargs.get('last_modified', None)
-        self.etag = kwargs.get('etag', None)
-        self.content_length = kwargs.get('content_length', None)
-        self.owner = kwargs.get('owner', None)
-        self.group = kwargs.get('group', None)
-        self.permissions = kwargs.get('permissions', None)
-
-
-class PathHTTPHeaders(Model):
-    """Additional parameters for a set of operations, such as: Path_create,
-    Path_update, Path_flush_data, Path_append_data.
-
-    :param cache_control: Optional. Sets the blob's cache control. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type cache_control: str
-    :param content_encoding: Optional. Sets the blob's content encoding. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type content_encoding: str
-    :param content_language: Optional. Set the blob's content language. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type content_language: str
-    :param content_disposition: Optional. Sets the blob's Content-Disposition
-     header.
-    :type content_disposition: str
-    :param content_type: Optional. Sets the blob's content type. If specified,
-     this property is stored with the blob and returned with a read request.
-    :type content_type: str
-    :param content_md5: Specify the transactional md5 for the body, to be
-     validated by the service.
-    :type content_md5: bytearray
-    :param transactional_content_hash: Specify the transactional md5 for the
-     body, to be validated by the service.
-    :type transactional_content_hash: bytearray
-    """
-
-    _attribute_map = {
-        'cache_control': {'key': '', 'type': 'str'},
-        'content_encoding': {'key': '', 'type': 'str'},
-        'content_language': {'key': '', 'type': 'str'},
-        'content_disposition': {'key': '', 'type': 'str'},
-        'content_type': {'key': '', 'type': 'str'},
-        'content_md5': {'key': '', 'type': 'bytearray'},
-        'transactional_content_hash': {'key': '', 'type': 'bytearray'},
-    }
-
-    def __init__(self, **kwargs):
-        super(PathHTTPHeaders, self).__init__(**kwargs)
-        self.cache_control = kwargs.get('cache_control', None)
-        self.content_encoding = kwargs.get('content_encoding', None)
-        self.content_language = kwargs.get('content_language', None)
-        self.content_disposition = kwargs.get('content_disposition', None)
-        self.content_type = kwargs.get('content_type', None)
-        self.content_md5 = kwargs.get('content_md5', None)
-        self.transactional_content_hash = kwargs.get('transactional_content_hash', None)
-
-
-class PathList(Model):
-    """PathList.
-
-    :param paths:
-    :type paths: list[~azure.storage.file.datalake.models.Path]
-    """
-
-    _attribute_map = {
-        'paths': {'key': 'paths', 'type': '[Path]'},
-    }
-
-    def __init__(self, **kwargs):
-        super(PathList, self).__init__(**kwargs)
-        self.paths = kwargs.get('paths', None)
-
-
-class SourceModifiedAccessConditions(Model):
-    """Additional parameters for create operation.
-
-    :param source_if_match: Specify an ETag value to operate only on blobs
-     with a matching value.
-    :type source_if_match: str
-    :param source_if_none_match: Specify an ETag value to operate only on
-     blobs without a matching value.
-    :type source_if_none_match: str
-    :param source_if_modified_since: Specify this header value to operate only
-     on a blob if it has been modified since the specified date/time.
-    :type source_if_modified_since: datetime
-    :param source_if_unmodified_since: Specify this header value to operate
-     only on a blob if it has not been modified since the specified date/time.
-    :type source_if_unmodified_since: datetime
-    """
-
-    _attribute_map = {
-        'source_if_match': {'key': '', 'type': 'str'},
-        'source_if_none_match': {'key': '', 'type': 'str'},
-        'source_if_modified_since': {'key': '', 'type': 'rfc-1123'},
-        'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
-    }
-
-    def __init__(self, **kwargs):
-        super(SourceModifiedAccessConditions, self).__init__(**kwargs)
-        self.source_if_match = kwargs.get('source_if_match', None)
-        self.source_if_none_match = kwargs.get('source_if_none_match', None)
-        self.source_if_modified_since = kwargs.get('source_if_modified_since', None)
-        self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None)
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param error: The service error response object.
-    :type error: ~azure.storage.file.datalake.models.StorageErrorError
-    """
-
-    _attribute_map = {
-        'error': {'key': 'error', 'type': 'StorageErrorError'},
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageError, self).__init__(**kwargs)
-        self.error = kwargs.get('error', None)
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageErrorError(Model):
-    """The service error response object.
-
-    :param code: The service error code.
-    :type code: str
-    :param message: The service error message.
-    :type message: str
-    """
-
-    _attribute_map = {
-        'code': {'key': 'Code', 'type': 'str'},
-        'message': {'key': 'Message', 'type': 'str'},
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageErrorError, self).__init__(**kwargs)
-        self.code = kwargs.get('code', None)
-        self.message = kwargs.get('message', None)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models_py3.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models_py3.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models_py3.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models_py3.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,297 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class FileSystem(Model):
-    """FileSystem.
-
-    :param name:
-    :type name: str
-    :param last_modified:
-    :type last_modified: str
-    :param e_tag:
-    :type e_tag: str
-    """
-
-    _attribute_map = {
-        'name': {'key': 'name', 'type': 'str'},
-        'last_modified': {'key': 'lastModified', 'type': 'str'},
-        'e_tag': {'key': 'eTag', 'type': 'str'},
-    }
-
-    def __init__(self, *, name: str=None, last_modified: str=None, e_tag: str=None, **kwargs) -> None:
-        super(FileSystem, self).__init__(**kwargs)
-        self.name = name
-        self.last_modified = last_modified
-        self.e_tag = e_tag
-
-
-class FileSystemList(Model):
-    """FileSystemList.
-
-    :param filesystems:
-    :type filesystems: list[~azure.storage.file.datalake.models.FileSystem]
-    """
-
-    _attribute_map = {
-        'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'},
-    }
-
-    def __init__(self, *, filesystems=None, **kwargs) -> None:
-        super(FileSystemList, self).__init__(**kwargs)
-        self.filesystems = filesystems
-
-
-class LeaseAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param lease_id: If specified, the operation only succeeds if the
-     resource's lease is active and matches this ID.
-    :type lease_id: str
-    """
-
-    _attribute_map = {
-        'lease_id': {'key': '', 'type': 'str'},
-    }
-
-    def __init__(self, *, lease_id: str=None, **kwargs) -> None:
-        super(LeaseAccessConditions, self).__init__(**kwargs)
-        self.lease_id = lease_id
-
-
-class ModifiedAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param if_modified_since: Specify this header value to operate only on a
-     blob if it has been modified since the specified date/time.
-    :type if_modified_since: datetime
-    :param if_unmodified_since: Specify this header value to operate only on a
-     blob if it has not been modified since the specified date/time.
-    :type if_unmodified_since: datetime
-    :param if_match: Specify an ETag value to operate only on blobs with a
-     matching value.
-    :type if_match: str
-    :param if_none_match: Specify an ETag value to operate only on blobs
-     without a matching value.
-    :type if_none_match: str
-    """
-
-    _attribute_map = {
-        'if_modified_since': {'key': '', 'type': 'rfc-1123'},
-        'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
-        'if_match': {'key': '', 'type': 'str'},
-        'if_none_match': {'key': '', 'type': 'str'},
-    }
-
-    def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, **kwargs) -> None:
-        super(ModifiedAccessConditions, self).__init__(**kwargs)
-        self.if_modified_since = if_modified_since
-        self.if_unmodified_since = if_unmodified_since
-        self.if_match = if_match
-        self.if_none_match = if_none_match
-
-
-class Path(Model):
-    """Path.
-
-    :param name:
-    :type name: str
-    :param is_directory:  Default value: False .
-    :type is_directory: bool
-    :param last_modified:
-    :type last_modified: str
-    :param e_tag:
-    :type e_tag: str
-    :param content_length:
-    :type content_length: long
-    :param owner:
-    :type owner: str
-    :param group:
-    :type group: str
-    :param permissions:
-    :type permissions: str
-    """
-
-    _attribute_map = {
-        'name': {'key': 'name', 'type': 'str'},
-        'is_directory': {'key': 'isDirectory', 'type': 'bool'},
-        'last_modified': {'key': 'lastModified', 'type': 'str'},
-        'e_tag': {'key': 'eTag', 'type': 'str'},
-        'content_length': {'key': 'contentLength', 'type': 'long'},
-        'owner': {'key': 'owner', 'type': 'str'},
-        'group': {'key': 'group', 'type': 'str'},
-        'permissions': {'key': 'permissions', 'type': 'str'},
-    }
-
-    def __init__(self, *, name: str=None, is_directory: bool=False, last_modified: str=None, e_tag: str=None, content_length: int=None, owner: str=None, group: str=None, permissions: str=None, **kwargs) -> None:
-        super(Path, self).__init__(**kwargs)
-        self.name = name
-        self.is_directory = is_directory
-        self.last_modified = last_modified
-        self.e_tag = e_tag
-        self.content_length = content_length
-        self.owner = owner
-        self.group = group
-        self.permissions = permissions
-
-
-class PathHTTPHeaders(Model):
-    """Additional parameters for a set of operations, such as: Path_create,
-    Path_update, Path_flush_data, Path_append_data.
-
-    :param cache_control: Optional. Sets the blob's cache control. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type cache_control: str
-    :param content_encoding: Optional. Sets the blob's content encoding. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type content_encoding: str
-    :param content_language: Optional. Set the blob's content language. If
-     specified, this property is stored with the blob and returned with a read
-     request.
-    :type content_language: str
-    :param content_disposition: Optional. Sets the blob's Content-Disposition
-     header.
-    :type content_disposition: str
-    :param content_type: Optional. Sets the blob's content type. If specified,
-     this property is stored with the blob and returned with a read request.
-    :type content_type: str
-    :param content_md5: Specify the transactional md5 for the body, to be
-     validated by the service.
-    :type content_md5: bytearray
-    :param transactional_content_hash: Specify the transactional md5 for the
-     body, to be validated by the service.
-    :type transactional_content_hash: bytearray
-    """
-
-    _attribute_map = {
-        'cache_control': {'key': '', 'type': 'str'},
-        'content_encoding': {'key': '', 'type': 'str'},
-        'content_language': {'key': '', 'type': 'str'},
-        'content_disposition': {'key': '', 'type': 'str'},
-        'content_type': {'key': '', 'type': 'str'},
-        'content_md5': {'key': '', 'type': 'bytearray'},
-        'transactional_content_hash': {'key': '', 'type': 'bytearray'},
-    }
-
-    def __init__(self, *, cache_control: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, content_type: str=None, content_md5: bytearray=None, transactional_content_hash: bytearray=None, **kwargs) -> None:
-        super(PathHTTPHeaders, self).__init__(**kwargs)
-        self.cache_control = cache_control
-        self.content_encoding = content_encoding
-        self.content_language = content_language
-        self.content_disposition = content_disposition
-        self.content_type = content_type
-        self.content_md5 = content_md5
-        self.transactional_content_hash = transactional_content_hash
-
-
-class PathList(Model):
-    """PathList.
-
-    :param paths:
-    :type paths: list[~azure.storage.file.datalake.models.Path]
-    """
-
-    _attribute_map = {
-        'paths': {'key': 'paths', 'type': '[Path]'},
-    }
-
-    def __init__(self, *, paths=None, **kwargs) -> None:
-        super(PathList, self).__init__(**kwargs)
-        self.paths = paths
-
-
-class SourceModifiedAccessConditions(Model):
-    """Additional parameters for create operation.
-
-    :param source_if_match: Specify an ETag value to operate only on blobs
-     with a matching value.
-    :type source_if_match: str
-    :param source_if_none_match: Specify an ETag value to operate only on
-     blobs without a matching value.
-    :type source_if_none_match: str
-    :param source_if_modified_since: Specify this header value to operate only
-     on a blob if it has been modified since the specified date/time.
-    :type source_if_modified_since: datetime
-    :param source_if_unmodified_since: Specify this header value to operate
-     only on a blob if it has not been modified since the specified date/time.
-    :type source_if_unmodified_since: datetime
-    """
-
-    _attribute_map = {
-        'source_if_match': {'key': '', 'type': 'str'},
-        'source_if_none_match': {'key': '', 'type': 'str'},
-        'source_if_modified_since': {'key': '', 'type': 'rfc-1123'},
-        'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
-    }
-
-    def __init__(self, *, source_if_match: str=None, source_if_none_match: str=None, source_if_modified_since=None, source_if_unmodified_since=None, **kwargs) -> None:
-        super(SourceModifiedAccessConditions, self).__init__(**kwargs)
-        self.source_if_match = source_if_match
-        self.source_if_none_match = source_if_none_match
-        self.source_if_modified_since = source_if_modified_since
-        self.source_if_unmodified_since = source_if_unmodified_since
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param error: The service error response object.
-    :type error: ~azure.storage.file.datalake.models.StorageErrorError
-    """
-
-    _attribute_map = {
-        'error': {'key': 'error', 'type': 'StorageErrorError'},
-    }
-
-    def __init__(self, *, error=None, **kwargs) -> None:
-        super(StorageError, self).__init__(**kwargs)
-        self.error = error
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageErrorError(Model):
-    """The service error response object.
-
-    :param code: The service error code.
-    :type code: str
-    :param message: The service error message.
-    :type message: str
-    """
-
-    _attribute_map = {
-        'code': {'key': 'Code', 'type': 'str'},
-        'message': {'key': 'Message', 'type': 'str'},
-    }
-
-    def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None:
-        super(StorageErrorError, self).__init__(**kwargs)
-        self.code = code
-        self.message = message
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations import ServiceOperations
-from ._file_system_operations import FileSystemOperations
-from ._path_operations import PathOperations
-
-__all__ = [
-    'ServiceOperations',
-    'FileSystemOperations',
-    'PathOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_file_system_operations.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_file_system_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_file_system_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_file_system_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,462 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class FileSystemOperations(object):
-    """FileSystemOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    def create(self, properties=None, request_id=None, timeout=None, cls=None, **kwargs):
-        """Create FileSystem.
-
-        Create a FileSystem rooted at the specified location. If the FileSystem
-        already exists, the operation fails.  This operation does not support
-        conditional HTTP requests.
-
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{filesystem}'}
-
-    def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Set FileSystem Properties.
-
-        Set properties for the FileSystem.  This operation supports conditional
-        HTTP requests.  For more information, see [Specifying Conditional
-        Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/{filesystem}'}
-
-    def get_properties(self, request_id=None, timeout=None, cls=None, **kwargs):
-        """Get FileSystem Properties.
-
-        All system and user-defined filesystem properties are specified in the
-        response headers.
-
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{filesystem}'}
-
-    def delete(self, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Delete FileSystem.
-
-        Marks the FileSystem for deletion.  When a FileSystem is deleted, a
-        FileSystem with the same identifier cannot be created for at least 30
-        seconds. While the filesystem is being deleted, attempts to create a
-        filesystem with the same identifier will fail with status code 409
-        (Conflict), with the service returning additional error information
-        indicating that the filesystem is being deleted. All other operations,
-        including operations on any files or directories within the filesystem,
-        will fail with status code 404 (Not Found) while the filesystem is
-        being deleted. This operation supports conditional HTTP requests.  For
-        more information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{filesystem}'}
-
-    def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, cls=None, **kwargs):
-        """List Paths.
-
-        List FileSystem paths and their properties.
-
-        :param recursive: Required
-        :type recursive: bool
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param path: Optional.  Filters results to paths within the specified
-         directory. An error occurs if the directory does not exist.
-        :type path: str
-        :param max_results: An optional value that specifies the maximum
-         number of items to return. If omitted or greater than 5,000, the
-         response will include up to 5,000 items.
-        :type max_results: int
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: PathList or the result of cls(response)
-        :rtype: ~azure.storage.file.datalake.models.PathList
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.list_paths.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if path is not None:
-            query_parameters['directory'] = self._serialize.query("path", path, 'str')
-        query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
-        if max_results is not None:
-            query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('PathList', response)
-            header_dict = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_paths.metadata = {'url': '/{filesystem}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_path_operations.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_path_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_path_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_path_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1458 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class PathOperations(object):
-    """PathOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
-        """Create File | Create Directory | Rename File | Rename Directory.
-
-        Create or rename a file or directory.    By default, the destination is
-        overwritten and if the destination already exists and has a lease the
-        lease is broken.  This operation supports conditional HTTP requests.
-        For more information, see [Specifying Conditional Headers for Blob
-        Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-        To fail if the destination already exists, use a conditional request
-        with If-None-Match: "*".
-
-        :param resource: Required only for Create File and Create Directory.
-         The value must be "file" or "directory". Possible values include:
-         'directory', 'file'
-        :type resource: str or
-         ~azure.storage.file.datalake.models.PathResourceType
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param mode: Optional. Valid only when namespace is enabled. This
-         parameter determines the behavior of the rename operation. The value
-         must be "legacy" or "posix", and the default value will be "posix".
-         Possible values include: 'legacy', 'posix'
-        :type mode: str or ~azure.storage.file.datalake.models.PathRenameMode
-        :param rename_source: An optional file or directory to be renamed.
-         The value must have the following format: "/{filesystem}/{path}".  If
-         "x-ms-properties" is specified, the properties will overwrite the
-         existing properties; otherwise, the existing properties will be
-         preserved. This value must be a URL percent-encoded string. Note that
-         the string may only contain ASCII characters in the ISO-8859-1
-         character set.
-        :type rename_source: str
-        :param source_lease_id: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease_id: str
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param umask: Optional and only valid if Hierarchical Namespace is
-         enabled for the account. When creating a file or directory and the
-         parent folder does not have a default ACL, the umask restricts the
-         permissions of the file or directory to be created.  The resulting
-         permission is given by p bitwise and not u, where p is the permission
-         and u is the umask.  For example, if p is 0777 and u is 0057, then the
-         resulting permission is 0720.  The default permission is 0777 for a
-         directory and 0666 for a file.  The default umask is 0027.  The umask
-         must be specified in 4-digit octal notation (e.g. 0766).
-        :type umask: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.file.datalake.models.SourceModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        cache_control = None
-        if path_http_headers is not None:
-            cache_control = path_http_headers.cache_control
-        content_encoding = None
-        if path_http_headers is not None:
-            content_encoding = path_http_headers.content_encoding
-        content_language = None
-        if path_http_headers is not None:
-            content_language = path_http_headers.content_language
-        content_disposition = None
-        if path_http_headers is not None:
-            content_disposition = path_http_headers.content_disposition
-        content_type = None
-        if path_http_headers is not None:
-            content_type = path_http_headers.content_type
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-        source_if_match = None
-        if source_modified_access_conditions is not None:
-            source_if_match = source_modified_access_conditions.source_if_match
-        source_if_none_match = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match = source_modified_access_conditions.source_if_none_match
-        source_if_modified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_modified_since = source_modified_access_conditions.source_if_modified_since
-        source_if_unmodified_since = None
-        if source_modified_access_conditions is not None:
-            source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if resource is not None:
-            query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType')
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if mode is not None:
-            query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if rename_source is not None:
-            header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
-        if source_lease_id is not None:
-            header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
-        if umask is not None:
-            header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-        if source_if_match is not None:
-            header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
-        if source_if_none_match is not None:
-            header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
-        if source_if_modified_since is not None:
-            header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
-        if source_if_unmodified_since is not None:
-            header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{filesystem}/{path}'}
-
-    def update(self, action, body, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Append Data | Flush Data | Set Properties | Set Access Control.
-
-        Uploads data to be appended to a file, flushes (writes) previously
-        uploaded data to a file, sets properties for a file or directory, or
-        sets access control for a file or directory. Data can only be appended
-        to a file. This operation supports conditional HTTP requests. For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param action: The action must be "append" to upload data to be
-         appended to a file, "flush" to flush previously uploaded data to a
-         file, "setProperties" to set the properties of a file or directory, or
-         "setAccessControl" to set the owner, group, permissions, or access
-         control list for a file or directory.  Note that Hierarchical
-         Namespace must be enabled for the account in order to use access
-         control.  Also note that the Access Control List (ACL) includes
-         permissions for the owner, owning group, and others, so the
-         x-ms-permissions and x-ms-acl request headers are mutually exclusive.
-         Possible values include: 'append', 'flush', 'setProperties',
-         'setAccessControl'
-        :type action: str or
-         ~azure.storage.file.datalake.models.PathUpdateAction
-        :param body: Initial data
-        :type body: Generator
-        :param position: This parameter allows the caller to upload data in
-         parallel and control the order in which it is appended to the file.
-         It is required when uploading data to be appended to the file and when
-         flushing previously uploaded data to the file.  The value must be the
-         position where the data is to be appended.  Uploaded data is not
-         immediately flushed, or written, to the file.  To flush, the
-         previously uploaded data must be contiguous, the position parameter
-         must be specified and equal to the length of the file after all data
-         has been written, and there must not be a request entity body included
-         with the request.
-        :type position: long
-        :param retain_uncommitted_data: Valid only for flush operations.  If
-         "true", uncommitted data is retained after the flush operation
-         completes; otherwise, the uncommitted data is deleted after the flush
-         operation.  The default is false.  Data at offsets less than the
-         specified position are written to the file when flush succeeds, but
-         this optional parameter allows data after the flush position to be
-         retained for a future flush operation.
-        :type retain_uncommitted_data: bool
-        :param close: Azure Storage Events allow applications to receive
-         notifications when files change. When Azure Storage Events are
-         enabled, a file changed event is raised. This event has a property
-         indicating whether this is the final change to distinguish the
-         difference between an intermediate flush to a file stream and the
-         final close of a file stream. The close query parameter is valid only
-         when the action is "flush" and change notifications are enabled. If
-         the value of close is "true" and the flush operation completes
-         successfully, the service raises a file change notification with a
-         property indicating that this is the final update (the file stream has
-         been closed). If "false" a change notification is raised indicating
-         the file has changed. The default is false. This query parameter is
-         set to true by the Hadoop ABFS driver to indicate that the file stream
-         has been closed."
-        :type close: bool
-        :param content_length: Required for "Append Data" and "Flush Data".
-         Must be 0 for "Flush Data".  Must be the length of the request content
-         in bytes for "Append Data".
-        :type content_length: long
-        :param properties: Optional. User-defined properties to be stored with
-         the filesystem, in the format of a comma-separated list of name and
-         value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
-         string. Note that the string may only contain ASCII characters in the
-         ISO-8859-1 character set.  If the filesystem exists, any properties
-         not included in the list will be removed.  All properties are removed
-         if the header is omitted.  To merge new and existing properties, first
-         get all existing properties and the current E-Tag, then make a
-         conditional request with the E-Tag and include values for all
-         properties.
-        :type properties: str
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param acl: Sets POSIX access control rights on files and directories.
-         The value is a comma-separated list of access control entries. Each
-         access control entry (ACE) consists of a scope, a type, a user or
-         group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        content_md5 = None
-        if path_http_headers is not None:
-            content_md5 = path_http_headers.content_md5
-        cache_control = None
-        if path_http_headers is not None:
-            cache_control = path_http_headers.cache_control
-        content_type = None
-        if path_http_headers is not None:
-            content_type = path_http_headers.content_type
-        content_disposition = None
-        if path_http_headers is not None:
-            content_disposition = path_http_headers.content_disposition
-        content_encoding = None
-        if path_http_headers is not None:
-            content_encoding = path_http_headers.content_encoding
-        content_language = None
-        if path_http_headers is not None:
-            content_language = path_http_headers.content_language
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.update.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction')
-        if position is not None:
-            query_parameters['position'] = self._serialize.query("position", position, 'long')
-        if retain_uncommitted_data is not None:
-            query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
-        if close is not None:
-            query_parameters['close'] = self._serialize.query("close", close, 'bool')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        if content_length is not None:
-            header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
-        if properties is not None:
-            header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
-        if acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    update.metadata = {'url': '/{filesystem}/{path}'}
-
-    def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Lease Path.
-
-        Create and manage a lease to restrict write and delete access to the
-        path. This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param x_ms_lease_action: There are five lease actions: "acquire",
-         "break", "change", "renew", and "release". Use "acquire" and specify
-         the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a
-         new lease. Use "break" to break an existing lease. When a lease is
-         broken, the lease break period is allowed to elapse, during which time
-         no lease operation except break and release can be performed on the
-         file. When a lease is successfully broken, the response indicates the
-         interval in seconds until a new lease can be acquired. Use "change"
-         and specify the current lease ID in "x-ms-lease-id" and the new lease
-         ID in "x-ms-proposed-lease-id" to change the lease ID of an active
-         lease. Use "renew" and specify the "x-ms-lease-id" to renew an
-         existing lease. Use "release" and specify the "x-ms-lease-id" to
-         release a lease. Possible values include: 'acquire', 'break',
-         'change', 'renew', 'release'
-        :type x_ms_lease_action: str or
-         ~azure.storage.file.datalake.models.PathLeaseAction
-        :param x_ms_lease_duration: The lease duration is required to acquire
-         a lease, and specifies the duration of the lease in seconds.  The
-         lease duration must be between 15 and 60 seconds or -1 for infinite
-         lease.
-        :type x_ms_lease_duration: int
-        :param x_ms_lease_break_period: The lease break period duration is
-         optional to break a lease, and  specifies the break period of the
-         lease in seconds.  The lease break  duration must be between 0 and 60
-         seconds.
-        :type x_ms_lease_break_period: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The Blob service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction')
-        if x_ms_lease_duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int')
-        if x_ms_lease_break_period is not None:
-            header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 201, 202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    lease.metadata = {'url': '/{filesystem}/{path}'}
-
-    def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Read File.
-
-        Read the contents of a file.  For read operations, range requests are
-        supported. This operation supports conditional HTTP requests.  For more
-        information, see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param range: The HTTP Range request header specifies one or more byte
-         ranges of the resource to be retrieved.
-        :type range: str
-        :param x_ms_range_get_content_md5: Optional. When this header is set
-         to "true" and specified together with the Range header, the service
-         returns the MD5 hash for the range, as long as the range is less than
-         or equal to 4MB in size. If this header is specified without the Range
-         header, the service returns status code 400 (Bad Request). If this
-         header is set to true when the range exceeds 4 MB in size, the service
-         returns status code 400 (Bad Request).
-        :type x_ms_range_get_content_md5: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.read.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        if range is not None:
-            header_parameters['Range'] = self._serialize.header("range", range, 'str')
-        if x_ms_range_get_content_md5 is not None:
-            header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 206]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
-                'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-        if response.status_code == 206:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
-                'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    read.metadata = {'url': '/{filesystem}/{path}'}
-
-    def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Get Properties | Get Status | Get Access Control List.
-
-        Get Properties returns all system and user defined properties for a
-        path. Get Status returns all system defined properties for a path. Get
-        Access Control List returns the access control list for a path. This
-        operation supports conditional HTTP requests.  For more information,
-        see [Specifying Conditional Headers for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param action: Optional. If the value is "getStatus" only the system
-         defined properties for the path are returned. If the value is
-         "getAccessControl" the access control list is returned in the response
-         headers (Hierarchical Namespace must be enabled for the account),
-         otherwise the properties are returned. Possible values include:
-         'getAccessControl', 'getStatus'
-        :type action: str or
-         ~azure.storage.file.datalake.models.PathGetPropertiesAction
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if action is not None:
-            query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction')
-        if upn is not None:
-            query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
-                'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
-                'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
-                'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
-                'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
-                'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
-                'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{filesystem}/{path}'}
-
-    def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Delete File | Delete Directory.
-
-        Delete the file or directory. This operation supports conditional HTTP
-        requests.  For more information, see [Specifying Conditional Headers
-        for Blob Service
-        Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
-
-        :param recursive: Required
-        :type recursive: bool
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if recursive is not None:
-            query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{filesystem}/{path}'}
-
-    def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a path.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param owner: Optional. The owner of the blob or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the blob or directory.
-        :type group: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param acl: Sets POSIX access control rights on files and directories.
-         The value is a comma-separated list of access control entries. Each
-         access control entry (ACE) consists of a scope, a type, a user or
-         group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-        :type acl: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "setAccessControl"
-
-        # Construct URL
-        url = self.set_access_control.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if owner is not None:
-            header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
-        if group is not None:
-            header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
-        if permissions is not None:
-            header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
-        if acl is not None:
-            header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-            }
-            return cls(response, None, response_headers)
-    set_access_control.metadata = {'url': '/{filesystem}/{path}'}
-
-    def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
-        """Set the owner, group, permissions, or access control list for a path.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param position: This parameter allows the caller to upload data in
-         parallel and control the order in which it is appended to the file.
-         It is required when uploading data to be appended to the file and when
-         flushing previously uploaded data to the file.  The value must be the
-         position where the data is to be appended.  Uploaded data is not
-         immediately flushed, or written, to the file.  To flush, the
-         previously uploaded data must be contiguous, the position parameter
-         must be specified and equal to the length of the file after all data
-         has been written, and there must not be a request entity body included
-         with the request.
-        :type position: long
-        :param retain_uncommitted_data: Valid only for flush operations.  If
-         "true", uncommitted data is retained after the flush operation
-         completes; otherwise, the uncommitted data is deleted after the flush
-         operation.  The default is false.  Data at offsets less than the
-         specified position are written to the file when flush succeeds, but
-         this optional parameter allows data after the flush position to be
-         retained for a future flush operation.
-        :type retain_uncommitted_data: bool
-        :param close: Azure Storage Events allow applications to receive
-         notifications when files change. When Azure Storage Events are
-         enabled, a file changed event is raised. This event has a property
-         indicating whether this is the final change to distinguish the
-         difference between an intermediate flush to a file stream and the
-         final close of a file stream. The close query parameter is valid only
-         when the action is "flush" and change notifications are enabled. If
-         the value of close is "true" and the flush operation completes
-         successfully, the service raises a file change notification with a
-         property indicating that this is the final update (the file stream has
-         been closed). If "false" a change notification is raised indicating
-         the file has changed. The default is false. This query parameter is
-         set to true by the Hadoop ABFS driver to indicate that the file stream
-         has been closed."
-        :type close: bool
-        :param content_length: Required for "Append Data" and "Flush Data".
-         Must be 0 for "Flush Data".  Must be the length of the request content
-         in bytes for "Append Data".
-        :type content_length: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param modified_access_conditions: Additional parameters for the
-         operation
-        :type modified_access_conditions:
-         ~azure.storage.file.datalake.models.ModifiedAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        content_md5 = None
-        if path_http_headers is not None:
-            content_md5 = path_http_headers.content_md5
-        cache_control = None
-        if path_http_headers is not None:
-            cache_control = path_http_headers.cache_control
-        content_type = None
-        if path_http_headers is not None:
-            content_type = path_http_headers.content_type
-        content_disposition = None
-        if path_http_headers is not None:
-            content_disposition = path_http_headers.content_disposition
-        content_encoding = None
-        if path_http_headers is not None:
-            content_encoding = path_http_headers.content_encoding
-        content_language = None
-        if path_http_headers is not None:
-            content_language = path_http_headers.content_language
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-        if_match = None
-        if modified_access_conditions is not None:
-            if_match = modified_access_conditions.if_match
-        if_none_match = None
-        if modified_access_conditions is not None:
-            if_none_match = modified_access_conditions.if_none_match
-        if_modified_since = None
-        if modified_access_conditions is not None:
-            if_modified_since = modified_access_conditions.if_modified_since
-        if_unmodified_since = None
-        if modified_access_conditions is not None:
-            if_unmodified_since = modified_access_conditions.if_unmodified_since
-
-        action = "flush"
-
-        # Construct URL
-        url = self.flush_data.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if position is not None:
-            query_parameters['position'] = self._serialize.query("position", position, 'long')
-        if retain_uncommitted_data is not None:
-            query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
-        if close is not None:
-            query_parameters['close'] = self._serialize.query("close", close, 'bool')
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if content_length is not None:
-            header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
-        if cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
-        if content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
-        if content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
-        if content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
-        if content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if if_match is not None:
-            header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
-        if if_none_match is not None:
-            header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
-        if if_modified_since is not None:
-            header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
-        if if_unmodified_since is not None:
-            header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-            }
-            return cls(response, None, response_headers)
-    flush_data.metadata = {'url': '/{filesystem}/{path}'}
-
-    def append_data(self, body, position=None, timeout=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Append data to the file.
-
-        :param body: Initial data
-        :type body: Generator
-        :param position: This parameter allows the caller to upload data in
-         parallel and control the order in which it is appended to the file.
-         It is required when uploading data to be appended to the file and when
-         flushing previously uploaded data to the file.  The value must be the
-         position where the data is to be appended.  Uploaded data is not
-         immediately flushed, or written, to the file.  To flush, the
-         previously uploaded data must be contiguous, the position parameter
-         must be specified and equal to the length of the file after all data
-         has been written, and there must not be a request entity body included
-         with the request.
-        :type position: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param content_length: Required for "Append Data" and "Flush Data".
-         Must be 0 for "Flush Data".  Must be the length of the request content
-         in bytes for "Append Data".
-        :type content_length: long
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param path_http_headers: Additional parameters for the operation
-        :type path_http_headers:
-         ~azure.storage.file.datalake.models.PathHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.file.datalake.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        transactional_content_hash = None
-        if path_http_headers is not None:
-            transactional_content_hash = path_http_headers.transactional_content_hash
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        action = "append"
-
-        # Construct URL
-        url = self.append_data.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if position is not None:
-            query_parameters['position'] = self._serialize.query("position", position, 'long')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['action'] = self._serialize.query("action", action, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-        if content_length is not None:
-            header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if transactional_content_hash is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.patch(url, query_parameters, header_parameters, stream_content=body)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-            }
-            return cls(response, None, response_headers)
-    append_data.metadata = {'url': '/{filesystem}/{path}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_service_operations.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_service_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_service_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_service_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,128 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class ServiceOperations(object):
-    """ServiceOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar resource: The value must be "account" for all account operations. Constant value: "account".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.resource = "account"
-
-    def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, cls=None, **kwargs):
-        """List FileSystems.
-
-        List filesystems and their properties in given account.
-
-        :param prefix: Filters results to filesystems within the specified
-         prefix.
-        :type prefix: str
-        :param continuation: Optional.  When deleting a directory, the number
-         of paths that are deleted with each invocation is limited.  If the
-         number of paths to be deleted exceeds this limit, a continuation token
-         is returned in this response header.  When a continuation token is
-         returned in the response, it must be specified in a subsequent
-         invocation of the delete operation to continue deleting the directory.
-        :type continuation: str
-        :param max_results: An optional value that specifies the maximum
-         number of items to return. If omitted or greater than 5,000, the
-         response will include up to 5,000 items.
-        :type max_results: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
-         Timeouts for Blob Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: FileSystemList or the result of cls(response)
-        :rtype: ~azure.storage.file.datalake.models.FileSystemList
-        :raises:
-         :class:`StorageErrorException<azure.storage.file.datalake.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.list_file_systems.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if continuation is not None:
-            query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
-        if max_results is not None:
-            query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('FileSystemList', response)
-            header_dict = {
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_file_systems.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/version.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/version.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-VERSION = "2018-11-09"
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,535 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called, too-many-lines
-from enum import Enum
-
-from azure.core.paging import PageIterator
-from azure.storage.blob import LeaseProperties as BlobLeaseProperties
-from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions
-from azure.storage.blob import ResourceTypes as BlobResourceTypes
-from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey
-from azure.storage.blob import ContentSettings as BlobContentSettings
-from azure.storage.blob import ContainerSasPermissions, BlobSasPermissions
-from azure.storage.blob._generated.models import StorageErrorException
-from azure.storage.blob._models import ContainerPropertiesPaged
-from ._deserialize import return_headers_and_deserialized_path_list
-from ._generated.models import Path
-from ._shared.models import DictMixin
-from ._shared.response_handlers import process_storage_error
-
-
-class FileSystemProperties(object):
-    """File System properties class.
-
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the file system was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
-        Stores all the lease information for the file system.
-    :ivar str public_access: Specifies whether data in the file system may be accessed
-        publicly and the level of access.
-    :ivar bool has_immutability_policy:
-        Represents whether the file system has an immutability policy.
-    :ivar bool has_legal_hold:
-        Represents whether the file system has a legal hold.
-    :ivar dict metadata: A dict with name-value pairs to associate with the
-        file system as metadata.
-
-    Returned ``FileSystemProperties`` instances expose these values through a
-    dictionary interface, for example: ``file_system_props["last_modified"]``.
-    Additionally, the file system name is available as ``file_system_props["name"]``.
-    """
-    def __init__(self):
-        self.name = None
-        self.last_modified = None
-        self.etag = None
-        self.lease = None
-        self.public_access = None
-        self.has_immutability_policy = None
-        self.has_legal_hold = None
-        self.metadata = None
-
-    @classmethod
-    def _from_generated(cls, generated):
-        props = cls()
-        props.name = generated.name
-        props.last_modified = generated.properties.last_modified
-        props.etag = generated.properties.etag
-        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
-        props.public_access = PublicAccess._from_generated(  # pylint: disable=protected-access
-            generated.properties.public_access)
-        props.has_immutability_policy = generated.properties.has_immutability_policy
-        props.has_legal_hold = generated.properties.has_legal_hold
-        props.metadata = generated.metadata
-        return props
-
-    @classmethod
-    def _convert_from_container_props(cls, container_properties):
-        container_properties.__class__ = cls
-        container_properties.public_access = PublicAccess._from_generated(  # pylint: disable=protected-access
-            container_properties.public_access)
-        container_properties.lease.__class__ = LeaseProperties
-        return container_properties
-
-
-class FileSystemPropertiesPaged(ContainerPropertiesPaged):
-    """An Iterable of File System properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A file system name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only file systems whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of file system names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(FileSystemPropertiesPaged, self).__init__(
-            *args,
-            **kwargs
-        )
-
-    @staticmethod
-    def _build_item(item):
-        return FileSystemProperties._from_generated(item)  # pylint: disable=protected-access
-
-
-class DirectoryProperties(DictMixin):
-    """
-    :ivar str name: name of the directory
-    :ivar str etag: The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar bool deleted: if the current directory marked as deleted
-    :ivar dict metadata: Name-value pairs associated with the blob as metadata.
-    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
-        Stores all the lease information for the directory.
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the directory was modified.
-    :ivar ~datetime.datetime creation_time:
-        Indicates when the directory was created, in UTC.
-    :ivar int remaining_retention_days: The number of days that the directory will be retained
-        before being permanently deleted by the service.
-    :var ~azure.storage.filedatalake.ContentSettings content_settings:
-    """
-    def __init__(self, **kwargs):
-        super(DirectoryProperties, self).__init__(
-            **kwargs
-        )
-        self.name = None
-        self.etag = None
-        self.deleted = None
-        self.metadata = None
-        self.lease = None
-        self.last_modified = None
-        self.creation_time = None
-        self.deleted_time = None
-        self.remaining_retention_days = None
-
-    @classmethod
-    def _from_blob_properties(cls, blob_properties):
-        directory_props = DirectoryProperties()
-        directory_props.name = blob_properties.name
-        directory_props.etag = blob_properties.etag
-        directory_props.deleted = blob_properties.deleted
-        directory_props.metadata = blob_properties.metadata
-        directory_props.lease = blob_properties.lease
-        directory_props.lease.__class__ = LeaseProperties
-        directory_props.last_modified = blob_properties.last_modified
-        directory_props.creation_time = blob_properties.creation_time
-        directory_props.deleted_time = blob_properties.deleted_time
-        directory_props.remaining_retention_days = blob_properties.remaining_retention_days
-        return directory_props
-
-
-class FileProperties(DictMixin):
-    """
-    :ivar str name: name of the file
-    :ivar str etag: The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar bool deleted: if the current file marked as deleted
-    :ivar dict metadata: Name-value pairs associated with the blob as metadata.
-    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
-        Stores all the lease information for the file.
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the file was modified.
-    :ivar ~datetime.datetime creation_time:
-        Indicates when the file was created, in UTC.
-    :ivar int size: size of the file
-    :ivar int remaining_retention_days: The number of days that the file will be retained
-        before being permanently deleted by the service.
-    :var ~azure.storage.filedatalake.ContentSettings content_settings:
-    """
-    def __init__(self, **kwargs):
-        super(FileProperties, self).__init__(
-            **kwargs
-        )
-        self.name = None
-        self.etag = None
-        self.deleted = None
-        self.metadata = None
-        self.lease = None
-        self.last_modified = None
-        self.creation_time = None
-        self.size = None
-        self.deleted_time = None
-        self.remaining_retention_days = None
-        self.content_settings = None
-
-    @classmethod
-    def _from_blob_properties(cls, blob_properties):
-        file_props = FileProperties()
-        file_props.name = blob_properties.name
-        file_props.etag = blob_properties.etag
-        file_props.deleted = blob_properties.deleted
-        file_props.metadata = blob_properties.metadata
-        file_props.lease = blob_properties.lease
-        file_props.lease.__class__ = LeaseProperties
-        file_props.last_modified = blob_properties.last_modified
-        file_props.creation_time = blob_properties.creation_time
-        file_props.size = blob_properties.size
-        file_props.deleted_time = blob_properties.deleted_time
-        file_props.remaining_retention_days = blob_properties.remaining_retention_days
-        file_props.content_settings = blob_properties.content_settings
-        return file_props
-
-
-class PathProperties(object):
-    """Path properties listed by get_paths api.
-
-    :ivar str name: the full path for a file or directory.
-    :ivar str owner: The owner of the file or directory.
-    :ivar str group: he owning group of the file or directory.
-    :ivar str permissions: Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-    :ivar datetime last_modified:  A datetime object representing the last time the blob was modified.
-    :ivar bool is_directory: is the path a directory or not.
-    :ivar str etag: The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar content_length: the size of file if the path is a file.
-    """
-    def __init__(self, **kwargs):
-        super(PathProperties, self).__init__(
-            **kwargs
-        )
-        self.name = kwargs.pop('name', None)
-        self.owner = kwargs.get('owner', None)
-        self.group = kwargs.get('group', None)
-        self.permissions = kwargs.get('permissions', None)
-        self.last_modified = kwargs.get('last_modified', None)
-        self.is_directory = kwargs.get('is_directory', False)
-        self.etag = kwargs.get('etag', None)
-        self.content_length = kwargs.get('content_length', None)
-
-    @classmethod
-    def _from_generated(cls, generated):
-        path_prop = PathProperties()
-        path_prop.name = generated.name
-        path_prop.owner = generated.owner
-        path_prop.group = generated.group
-        path_prop.permissions = generated.permissions
-        path_prop.last_modified = generated.last_modified
-        path_prop.is_directory = bool(generated.is_directory)
-        path_prop.etag = generated.additional_properties.get('etag')
-        path_prop.content_length = generated.content_length
-        return path_prop
-
-
-class PathPropertiesPaged(PageIterator):
-    """An Iterable of Path properties.
-
-    :ivar str path: Filters the results to return only paths under the specified path.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str path: Filters the results to return only paths under the specified path.
-    :param int max_results: The maximum number of psths to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(
-            self, command,
-            recursive,
-            path=None,
-            max_results=None,
-            continuation_token=None,
-            upn=None):
-        super(PathPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.recursive = recursive
-        self.results_per_page = max_results
-        self.path = path
-        self.upn = upn
-        self.current_page = None
-        self.path_list = None
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                self.recursive,
-                continuation=continuation_token or None,
-                path=self.path,
-                max_results=self.results_per_page,
-                upn=self.upn,
-                cls=return_headers_and_deserialized_path_list)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.path_list, self._response = get_next_return
-        self.current_page = [self._build_item(item) for item in self.path_list]
-
-        return self._response['continuation'] or None, self.current_page
-
-    @staticmethod
-    def _build_item(item):
-        if isinstance(item, PathProperties):
-            return item
-        if isinstance(item, Path):
-            path = PathProperties._from_generated(item)  # pylint: disable=protected-access
-            return path
-        return item
-
-
-class LeaseProperties(BlobLeaseProperties):
-    """DataLake Lease Properties.
-
-    :ivar str status:
-        The lease status of the file. Possible values: locked|unlocked
-    :ivar str state:
-        Lease state of the file. Possible values: available|leased|expired|breaking|broken
-    :ivar str duration:
-        When a file is leased, specifies whether the lease is of infinite or fixed duration.
-    """
-    def __init__(self):
-        self.status = None
-        self.state = None
-        self.duration = None
-
-
-class ContentSettings(BlobContentSettings):
-    """The content settings of a file or directory.
-
-    :ivar str content_type:
-        The content type specified for the file or directory. If no content type was
-        specified, the default content type is application/octet-stream.
-    :ivar str content_encoding:
-        If the content_encoding has previously been set
-        for the file, that value is stored.
-    :ivar str content_language:
-        If the content_language has previously been set
-        for the file, that value is stored.
-    :ivar str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the file, that value is stored.
-    :ivar str cache_control:
-        If the cache_control has previously been set for
-        the file, that value is stored.
-    :ivar str content_md5:
-        If the content_md5 has been set for the file, this response
-        header is stored so that the client can check for message content
-        integrity.
-    """
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None, **kwargs):
-        super(ContentSettings, self).__init__(
-            content_type=content_type,
-            content_encoding=content_encoding,
-            content_language=content_language,
-            content_disposition=content_disposition,
-            cache_control=cache_control,
-            content_md5=content_md5,
-            **kwargs
-        )
-
-
-class AccountSasPermissions(BlobAccountSasPermissions):
-    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
-                 create=False):
-        super(AccountSasPermissions, self).__init__(
-            read=read, create=create, write=write, list=list,
-            delete=delete
-        )
-
-
-class FileSystemSasPermissions(ContainerSasPermissions):
-    """FileSystemSasPermissions class to be used with the
-    :func:`~azure.storage.filedatalake.generate_file_system_sas` function.
-
-    :param bool read:
-        Read the content, properties, metadata etc.
-    :param bool write:
-        Create or write content, properties, metadata. Lease the file system.
-    :param bool delete:
-        Delete the file system.
-    :param bool list:
-        List paths in the file system.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False  # pylint: disable=redefined-builtin
-                 ):
-        super(FileSystemSasPermissions, self).__init__(
-            read=read, write=write, delete=delete, list=list
-        )
-
-
-class DirectorySasPermissions(BlobSasPermissions):
-    """DirectorySasPermissions class to be used with the
-    :func:`~azure.storage.filedatalake.generate_directory_sas` function.
-
-    :param bool read:
-        Read the content, properties, metadata etc.
-    :param bool create:
-        Create a new directory
-    :param bool write:
-        Create or write content, properties, metadata. Lease the directory.
-    :param bool delete:
-        Delete the directory.
-    """
-    def __init__(self, read=False, create=False, write=False,
-                 delete=False):
-        super(DirectorySasPermissions, self).__init__(
-            read=read, create=create, write=write,
-            delete=delete
-        )
-
-
-class FileSasPermissions(BlobSasPermissions):
-    """FileSasPermissions class to be used with the
-    :func:`~azure.storage.filedatalake.generate_file_sas` function.
-
-    :param bool read:
-        Read the content, properties, metadata etc. Use the file as
-        the source of a read operation.
-    :param bool create:
-        Write a new file
-    :param bool write:
-        Create or write content, properties, metadata. Lease the file.
-    :param bool delete:
-        Delete the file.
-    """
-    def __init__(self, read=False, create=False, write=False,
-                 delete=False):
-        super(FileSasPermissions, self).__init__(
-            read=read, create=create, write=write,
-            delete=delete
-        )
-
-
-class ResourceTypes(BlobResourceTypes):
-    """
-    Specifies the resource types that are accessible with the account SAS.
-
-    :param bool service:
-        Access to service-level APIs (e.g.List File Systems)
-    :param bool file_system:
-        Access to file_system-level APIs (e.g., Create/Delete file system,
-        List Directories/Files)
-    :param bool object:
-        Access to object-level APIs for
-        files(e.g. Create File, etc.)
-    """
-    def __init__(self, service=False, file_system=False, object=False  # pylint: disable=redefined-builtin
-                 ):
-        super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
-
-
-class UserDelegationKey(BlobUserDelegationKey):
-    """
-    Represents a user delegation key, provided to the user by Azure Storage
-    based on their Azure Active Directory access token.
-
-    The fields are saved as simple strings since the user does not have to interact with this object;
-    to generate an identify SAS, the user can simply pass it to the right API.
-
-    :ivar str signed_oid:
-        Object ID of this token.
-    :ivar str signed_tid:
-        Tenant ID of the tenant that issued this token.
-    :ivar str signed_start:
-        The datetime this token becomes valid.
-    :ivar str signed_expiry:
-        The datetime this token expires.
-    :ivar str signed_service:
-        What service this key is valid for.
-    :ivar str signed_version:
-        The version identifier of the REST service that created this token.
-    :ivar str value:
-        The user delegation key.
-    """
-    def _init(self):
-        super(UserDelegationKey, self).__init__()
-
-
-class PublicAccess(str, Enum):
-    """
-    Specifies whether data in the file system may be accessed publicly and the level of access.
-    """
-
-    OFF = 'off'
-    """
-    Specifies that there is no public read access for both the file systems and files within the file system.
-    Clients cannot enumerate the file systems within the storage account as well as the files within the file system.
-    """
-
-    File = 'blob'
-    """
-    Specifies public read access for files. file data within this file system can be read
-    via anonymous request, but file system data is not available. Clients cannot enumerate
-    files within the container via anonymous request.
-    """
-
-    FileSystem = 'container'
-    """
-    Specifies full public read access for file system and file data. Clients can enumerate
-    files within the file system via anonymous request, but cannot enumerate file systems
-    within the storage account.
-    """
-
-    @classmethod
-    def _from_generated(cls, public_access):
-        if public_access == "blob":  # pylint:disable=no-else-return
-            return cls.File
-        elif public_access == "container":
-            return cls.FileSystem
-        elif public_access == "off":
-            return cls.OFF
-        return None
-
-
-class LocationMode(object):
-    """
-    Specifies the location the request should be sent to. This mode only applies
-    for RA-GRS accounts which allow secondary read access. All other account types
-    must use PRIMARY.
-    """
-
-    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
-    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,653 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-try:
-    from urllib.parse import urlparse, quote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote  # type: ignore
-
-import six
-
-from azure.storage.blob import BlobClient
-from ._shared.base_client import StorageAccountHostsMixin, parse_query
-from ._shared.response_handlers import return_response_headers
-from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \
-    get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions
-from ._models import LocationMode, DirectoryProperties
-from ._generated import DataLakeStorageClient
-from ._data_lake_lease import DataLakeLeaseClient
-from ._generated.models import StorageErrorException
-from ._deserialize import process_storage_error
-
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
-    'The require_encryption flag is set, but encryption is not supported'
-    ' for this method.')
-
-
-class PathClient(StorageAccountHostsMixin):
-    def __init__(
-            self, account_url,  # type: str
-            file_system_name,  # type: str
-            path_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-
-        # remove the preceding/trailing delimiter from the path components
-        file_system_name = file_system_name.strip('/')
-        path_name = path_name.strip('/')
-        if not (file_system_name and path_name):
-            raise ValueError("Please specify a container name and blob name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        blob_account_url = convert_dfs_url_to_blob_url(account_url)
-        self._blob_account_url = blob_account_url
-
-        datalake_hosts = kwargs.pop('_hosts', None)
-        blob_hosts = None
-        if datalake_hosts:
-            blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
-            blob_secondary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.SECONDARY])
-            blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url,
-                          LocationMode.SECONDARY: blob_secondary_account_url}
-        self._blob_client = BlobClient(blob_account_url, file_system_name, path_name,
-                                       credential=credential, _hosts=blob_hosts, **kwargs)
-
-        _, sas_token = parse_query(parsed_url.query)
-        self.file_system_name = file_system_name
-        self.path_name = path_name
-
-        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
-
-        super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
-                                         _hosts=datalake_hosts, **kwargs)
-        self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline)
-
-    def _format_url(self, hostname):
-        file_system_name = self.file_system_name
-        if isinstance(file_system_name, six.text_type):
-            file_system_name = file_system_name.encode('UTF-8')
-        return "{}://{}/{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(file_system_name),
-            quote(self.path_name, safe='~'),
-            self._query_str)
-
-    def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_mod_conditions(kwargs)
-
-        path_http_headers = None
-        if content_settings:
-            path_http_headers = get_path_http_headers(content_settings)
-
-        options = {
-            'resource': resource_type,
-            'properties': add_metadata_headers(metadata),
-            'permissions': kwargs.pop('permissions', None),
-            'umask': kwargs.pop('umask', None),
-            'path_http_headers': path_http_headers,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Create directory or file
-
-        :param resource_type: Required for Create File and Create Directory.
-         The value must be "file" or "directory". Possible values include:
-         'directory', 'file'
-        :type resource_type: str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the file/directory as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Dict[str, Union[str, datetime]]
-        """
-        options = self._create_path_options(
-            resource_type,
-            content_settings=content_settings,
-            metadata=metadata,
-            **kwargs)
-        try:
-            return self._client.path.create(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @staticmethod
-    def _delete_path_options(**kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_mod_conditions(kwargs)
-
-        options = {
-            'recursive': True,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'timeout': kwargs.pop('timeout', None)}
-        options.update(kwargs)
-        return options
-
-    def _delete(self, **kwargs):
-        # type: (bool, **Any) -> None
-        """
-        Marks the specified path for deletion.
-
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        options = self._delete_path_options(**kwargs)
-        try:
-            return self._client.path.delete(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @staticmethod
-    def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_mod_conditions(kwargs)
-
-        options = {
-            'owner': owner,
-            'group': group,
-            'permissions': permissions,
-            'acl': acl,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    def set_access_control(self, owner=None,  # type: Optional[str]
-                           group=None,  # type: Optional[str]
-                           permissions=None,  # type: Optional[str]
-                           acl=None,  # type: Optional[str]
-                           **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Set the owner, group, permissions, or access control list for a path.
-
-        :param owner: Optional. The owner of the file or directory.
-        :type owner: str
-        :param group: Optional. The owning group of the file or directory.
-        :type group: str
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-         permissions and acl are mutually exclusive.
-        :type permissions: str
-        :param acl: Sets POSIX access control rights on files and directories.
-         The value is a comma-separated list of access control entries. Each
-         access control entry (ACE) consists of a scope, a type, a user or
-         group identifier, and permissions in the format
-         "[scope:][type]:[id]:[permissions]".
-         permissions and acl are mutually exclusive.
-        :type acl: str
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword: response dict (Etag and last modified).
-        """
-        options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
-        try:
-            return self._client.path.set_access_control(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @staticmethod
-    def _get_access_control_options(upn=None,  # type: Optional[bool]
-                                    **kwargs):
-        # type: (...) -> Dict[str, Any]
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        mod_conditions = get_mod_conditions(kwargs)
-
-        options = {
-            'action': 'getAccessControl',
-            'upn': upn if upn else False,
-            'lease_access_conditions': access_conditions,
-            'modified_access_conditions': mod_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    def get_access_control(self, upn=None,  # type: Optional[bool]
-                           **kwargs):
-        # type: (...) -> Dict[str, Any]
-        """
-        :param upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword: response dict.
-        """
-        options = self._get_access_control_options(upn=upn, **kwargs)
-        try:
-            return self._client.path.get_properties(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs):
-        # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        source_lease_id = get_lease_id(kwargs.pop('source_lease', None))
-        mod_conditions = get_mod_conditions(kwargs)
-        source_mod_conditions = get_source_mod_conditions(kwargs)
-
-        path_http_headers = None
-        if content_settings:
-            path_http_headers = get_path_http_headers(content_settings)
-
-        options = {
-            'rename_source': rename_source,
-            'properties': add_metadata_headers(metadata),
-            'permissions': kwargs.pop('permissions', None),
-            'umask': kwargs.pop('umask', None),
-            'path_http_headers': path_http_headers,
-            'lease_access_conditions': access_conditions,
-            'source_lease_id': source_lease_id,
-            'modified_access_conditions': mod_conditions,
-            'source_modified_access_conditions':source_mod_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'mode': 'legacy',
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    def _rename_path(self, rename_source,
-                     **kwargs):
-        # type: (**Any) -> Dict[str, Any]
-        """
-        Rename directory or file
-
-        :param rename_source: The value must have the following format: "/{filesystem}/{path}".
-        :type rename_source: str
-        :param source_lease: A lease ID for the source path. If specified,
-         the source path must have an active lease and the leaase ID must
-         match.
-        :type source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :param permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :type permissions: str
-        :param ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :param ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-        """
-        options = self._rename_path_options(
-            rename_source,
-            **kwargs)
-        try:
-            return self._client.path.create(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _get_path_properties(self, **kwargs):
-        # type: (**Any) -> Union[FileProperties, DirectoryProperties]
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file or directory. It does not return the content of the directory or file.
-
-        :keyword lease:
-            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: DirectoryProperties or FileProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a file/directory.
-        """
-        path_properties = self._blob_client.get_blob_properties(**kwargs)
-        path_properties.__class__ = DirectoryProperties
-        return path_properties
-
-    def set_metadata(self, metadata=None,  # type: Optional[Dict[str, str]]
-                     **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets one or more user-defined name-value pairs for the specified
-        file system. Each call to this operation replaces all existing metadata
-        attached to the file system. To remove all metadata from the file system,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the file system as
-            metadata. Example: {'category':'test'}
-        :type metadata: dict[str, str]
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, set_file_system_metadata only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: file system-updated property dict (Etag and last modified).
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START set_file_system_metadata]
-                :end-before: [END set_file_system_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Setting metadata on the container.
-        """
-        return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
-
-    def set_http_headers(self, content_settings=None,  # type: Optional[ContentSettings]
-                         **kwargs):
-        # type: (...) -> Dict[str, Any]
-        """Sets system properties on the file or directory.
-
-        If one property is set for the content_settings, all properties will be overriden.
-
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set file/directory properties.
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, set_file_system_metadata only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: file/directory-updated property dict (Etag and last modified)
-        :rtype: Dict[str, Any]
-        """
-        return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
-
-    def acquire_lease(self, lease_duration=-1,  # type: Optional[int]
-                      lease_id=None,  # type: Optional[str]
-                      **kwargs):
-        # type: (...) -> DataLakeLeaseClient
-        """
-        Requests a new lease. If the file or directory does not have an active lease,
-        the DataLake service creates a lease on the file/directory and returns a new
-        lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The DataLake service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
-        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START acquire_lease_on_file_system]
-                :end-before: [END acquire_lease_on_file_system]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on the file_system.
-        """
-        lease = DataLakeLeaseClient(self, lease_id=lease_id)  # type: ignore
-        lease.acquire(lease_duration=lease_duration, **kwargs)
-        return lease
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,81 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.storage.blob._serialize import _get_match_headers  # pylint: disable=protected-access
-from ._shared import encode_base64
-from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \
-    SourceModifiedAccessConditions, LeaseAccessConditions
-
-
-def convert_dfs_url_to_blob_url(dfs_account_url):
-    return dfs_account_url.replace('dfs.core.windows.net', 'blob.core.windows.net', 1)
-
-
-def add_metadata_headers(metadata=None):
-    # type: (Optional[Dict[str, str]]) -> str
-    headers = list()
-    if metadata:
-        for key, value in metadata.items():
-            headers.append(key + '=')
-            headers.append(encode_base64(value))
-            headers.append(',')
-
-    if headers:
-        del headers[-1]
-
-    return ''.join(headers)
-
-
-def get_mod_conditions(kwargs):
-    # type: (Dict[str, Any]) -> ModifiedAccessConditions
-    if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
-    return ModifiedAccessConditions(
-        if_modified_since=kwargs.pop('if_modified_since', None),
-        if_unmodified_since=kwargs.pop('if_unmodified_since', None),
-        if_match=if_match or kwargs.pop('if_match', None),
-        if_none_match=if_none_match or kwargs.pop('if_none_match', None)
-    )
-
-
-def get_source_mod_conditions(kwargs):
-    # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
-    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
-    return SourceModifiedAccessConditions(
-        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
-        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
-        source_if_match=if_match or kwargs.pop('source_if_match', None),
-        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
-    )
-
-
-def get_path_http_headers(content_settings):
-    path_headers = PathHTTPHeaders(
-        cache_control=content_settings.cache_control,
-        content_type=content_settings.content_type,
-        content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-        content_encoding=content_settings.content_encoding,
-        content_language=content_settings.content_language,
-        content_disposition=content_settings.content_disposition
-    )
-    return path_headers
-
-
-def get_access_conditions(lease):
-    # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
-    try:
-        lease_id = lease.id # type: ignore
-    except AttributeError:
-        lease_id = lease  # type: ignore
-    return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
-
-
-def get_lease_id(lease):
-    if not lease:
-        return ""
-    try:
-        lease_id = lease.id
-    except AttributeError:
-        lease_id = lease
-    return lease_id
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,56 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-
-try:
-    from urllib.parse import quote, unquote
-except ImportError:
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-
-
-def url_quote(url):
-    return quote(url)
-
-
-def url_unquote(url):
-    return unquote(url)
-
-
-def encode_base64(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def decode_base64_to_bytes(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def decode_base64_to_text(data):
-    decoded_bytes = decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, six.text_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, six.text_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = encode_base64(digest)
-    return encoded_digest
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/authentication.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/authentication.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/authentication.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/authentication.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,136 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-import sys
-
-try:
-    from urllib.parse import urlparse, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import unquote # type: ignore
-
-try:
-    from yarl import URL
-except ImportError:
-    pass
-
-try:
-    from azure.core.pipeline.transport import AioHttpTransport
-except ImportError:
-    AioHttpTransport = None
-
-from azure.core.exceptions import ClientAuthenticationError
-from azure.core.pipeline.policies import SansIOHTTPPolicy
-
-from . import sign_string
-
-
-logger = logging.getLogger(__name__)
-
-
-
-# wraps a given exception with the desired exception type
-def _wrap_exception(ex, desired_type):
-    msg = ""
-    if ex.args:
-        msg = ex.args[0]
-    if sys.version_info >= (3,):
-        # Automatic chaining in Python 3 means we keep the trace
-        return desired_type(msg)
-    # There isn't a good solution in 2 for keeping the stack trace
-    # in general, or that will not result in an error in 3
-    # However, we can keep the previous error type and message
-    # TODO: In the future we will log the trace
-    return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
-
-
-class AzureSigningError(ClientAuthenticationError):
-    """
-    Represents a fatal error when attempting to sign a request.
-    In general, the cause of this exception is user error. For example, the given account key is not valid.
-    Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
-    """
-
-
-# pylint: disable=no-self-use
-class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
-
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-        super(SharedKeyCredentialPolicy, self).__init__()
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.http_request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = urlparse(request.http_request.url).path
-        try:
-            if isinstance(request.context.transport, AioHttpTransport) or \
-                isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport):
-                uri_path = URL(uri_path)
-                return '/' + self.account_name + str(uri_path)
-        except TypeError:
-            pass
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.http_request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.http_request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value is not None:
-                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
-
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        try:
-            signature = sign_string(self.account_key, string_to_sign)
-            auth_string = 'SharedKey ' + self.account_name + ':' + signature
-            request.http_request.headers['Authorization'] = auth_string
-        except Exception as ex:
-            # Wrap any error that occurred as signing error
-            # Doing so will clarify/locate the source of problem
-            raise _wrap_exception(ex, AzureSigningError)
-
-    def on_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        #logger.debug("String_to_sign=%s", string_to_sign)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,427 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union,
-    Optional,
-    Any,
-    Iterable,
-    Dict,
-    List,
-    Type,
-    Tuple,
-    TYPE_CHECKING,
-)
-import logging
-
-try:
-    from urllib.parse import parse_qs, quote
-except ImportError:
-    from urlparse import parse_qs  # type: ignore
-    from urllib2 import quote  # type: ignore
-
-import six
-
-from azure.core.configuration import Configuration
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline import Pipeline
-from azure.core.pipeline.transport import RequestsTransport, HttpTransport
-from azure.core.pipeline.policies import (
-    RedirectPolicy,
-    ContentDecodePolicy,
-    BearerTokenCredentialPolicy,
-    ProxyPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-)
-
-from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .models import LocationMode
-from .authentication import SharedKeyCredentialPolicy
-from .shared_access_signature import QueryStringConstants
-from .policies import (
-    StorageHeadersPolicy,
-    StorageUserAgentPolicy,
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageResponseHook,
-    StorageLoggingPolicy,
-    StorageHosts,
-    QueueMessagePolicy,
-    ExponentialRetry,
-)
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-
-_LOGGER = logging.getLogger(__name__)
-_SERVICE_PARAMS = {
-    "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
-    "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"},
-    "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"},
-    "dfs": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
-}
-
-
-class StorageAccountHostsMixin(object):  # pylint: disable=too-many-instance-attributes
-    def __init__(
-        self,
-        parsed_url,  # type: Any
-        service,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
-        self._hosts = kwargs.get("_hosts")
-        self.scheme = parsed_url.scheme
-
-        if service not in ["blob", "queue", "file-share", "dfs"]:
-            raise ValueError("Invalid service: {}".format(service))
-        service_name = service.split('-')[0]
-        account = parsed_url.netloc.split(".{}.core.".format(service_name))
-        self.account_name = account[0] if len(account) > 1 else None
-        secondary_hostname = None
-
-        self.credential = format_shared_key_credential(account, credential)
-        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
-            raise ValueError("Token credential is only supported with HTTPS.")
-        if hasattr(self.credential, "account_name"):
-            self.account_name = self.credential.account_name
-            secondary_hostname = "{}-secondary.{}.{}".format(
-                self.credential.account_name, service_name, SERVICE_HOST_BASE)
-
-        if not self._hosts:
-            if len(account) > 1:
-                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
-            if kwargs.get("secondary_hostname"):
-                secondary_hostname = kwargs["secondary_hostname"]
-            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
-            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
-
-        self.require_encryption = kwargs.get("require_encryption", False)
-        self.key_encryption_key = kwargs.get("key_encryption_key")
-        self.key_resolver_function = kwargs.get("key_resolver_function")
-        self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)
-
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-
-    def __exit__(self, *args):
-        self._client.__exit__(*args)
-
-    def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        self._client.close()
-
-    @property
-    def url(self):
-        """The full endpoint URL to this entity, including SAS token if used.
-
-        This could be either the primary endpoint,
-        or the secondary endpoint depending on the current :func:`location_mode`.
-        """
-        return self._format_url(self._hosts[self._location_mode])
-
-    @property
-    def primary_endpoint(self):
-        """The full primary endpoint URL.
-
-        :type: str
-        """
-        return self._format_url(self._hosts[LocationMode.PRIMARY])
-
-    @property
-    def primary_hostname(self):
-        """The hostname of the primary endpoint.
-
-        :type: str
-        """
-        return self._hosts[LocationMode.PRIMARY]
-
-    @property
-    def secondary_endpoint(self):
-        """The full secondary endpoint URL if configured.
-
-        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str
-        :raise ValueError:
-        """
-        if not self._hosts[LocationMode.SECONDARY]:
-            raise ValueError("No secondary host configured.")
-        return self._format_url(self._hosts[LocationMode.SECONDARY])
-
-    @property
-    def secondary_hostname(self):
-        """The hostname of the secondary endpoint.
-
-        If not available this will be None. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str or None
-        """
-        return self._hosts[LocationMode.SECONDARY]
-
-    @property
-    def location_mode(self):
-        """The location mode that the client is currently using.
-
-        By default this will be "primary". Options include "primary" and "secondary".
-
-        :type: str
-        """
-
-        return self._location_mode
-
-    @location_mode.setter
-    def location_mode(self, value):
-        if self._hosts.get(value):
-            self._location_mode = value
-            self._client._config.url = self.url  # pylint: disable=protected-access
-        else:
-            raise ValueError("No host URL for location mode: {}".format(value))
-
-    @property
-    def api_version(self):
-        """The version of the Storage API used for requests.
-
-        :type: str
-        """
-        return self._client._config.version  # pylint: disable=protected-access
-
-    def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None):
-        query_str = "?"
-        if snapshot:
-            query_str += "snapshot={}&".format(self.snapshot)
-        if share_snapshot:
-            query_str += "sharesnapshot={}&".format(self.snapshot)
-        if sas_token and not credential:
-            query_str += sas_token
-        elif is_credential_sastoken(credential):
-            query_str += credential.lstrip("?")
-            credential = None
-        return query_str.rstrip("?&"), credential
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, "get_token"):
-            self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-
-        config = kwargs.get("_configuration") or create_configuration(**kwargs)
-        if kwargs.get("_pipeline"):
-            return config, kwargs["_pipeline"]
-        config.transport = kwargs.get("transport")  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            config.transport = RequestsTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            RedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs),
-            config.retry_policy,
-            config.logging_policy,
-            StorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs)
-        ]
-        return config, Pipeline(config.transport, policies=policies)
-
-    def _batch_send(
-        self, *reqs,  # type: HttpRequest
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ]
-        )
-
-        pipeline_response = self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts()
-            if raise_on_any_failure:
-                parts = list(response.parts())
-                if any(p for p in parts if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts
-                    )
-                    raise error
-                return iter(parts)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-class TransportWrapper(HttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, transport):
-        self._transport = transport
-
-    def send(self, request, **kwargs):
-        return self._transport.send(request, **kwargs)
-
-    def open(self):
-        pass
-
-    def close(self):
-        pass
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, *args):  # pylint: disable=arguments-differ
-        pass
-
-
-def format_shared_key_credential(account, credential):
-    if isinstance(credential, six.string_types):
-        if len(account) < 2:
-            raise ValueError("Unable to determine account name for shared key credential.")
-        credential = {"account_name": account[0], "account_key": credential}
-    if isinstance(credential, dict):
-        if "account_name" not in credential:
-            raise ValueError("Shared key credential missing 'account_name")
-        if "account_key" not in credential:
-            raise ValueError("Shared key credential missing 'account_key")
-        return SharedKeyCredentialPolicy(**credential)
-    return credential
-
-
-def parse_connection_str(conn_str, credential, service):
-    conn_str = conn_str.rstrip(";")
-    conn_settings = [s.split("=", 1) for s in conn_str.split(";")]
-    if any(len(tup) != 2 for tup in conn_settings):
-        raise ValueError("Connection string is either blank or malformed.")
-    conn_settings = dict(conn_settings)
-    endpoints = _SERVICE_PARAMS[service]
-    primary = None
-    secondary = None
-    if not credential:
-        try:
-            credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]}
-        except KeyError:
-            credential = conn_settings.get("SharedAccessSignature")
-    if endpoints["primary"] in conn_settings:
-        primary = conn_settings[endpoints["primary"]]
-        if endpoints["secondary"] in conn_settings:
-            secondary = conn_settings[endpoints["secondary"]]
-    else:
-        if endpoints["secondary"] in conn_settings:
-            raise ValueError("Connection string specifies only secondary endpoint.")
-        try:
-            primary = "{}://{}.{}.{}".format(
-                conn_settings["DefaultEndpointsProtocol"],
-                conn_settings["AccountName"],
-                service,
-                conn_settings["EndpointSuffix"],
-            )
-            secondary = "{}-secondary.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings["EndpointSuffix"]
-            )
-        except KeyError:
-            pass
-
-    if not primary:
-        try:
-            primary = "https://{}.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE)
-            )
-        except KeyError:
-            raise ValueError("Connection string missing required connection details.")
-    return primary, secondary, credential
-
-
-def create_configuration(**kwargs):
-    # type: (**Any) -> Configuration
-    config = Configuration(**kwargs)
-    config.headers_policy = StorageHeadersPolicy(**kwargs)
-    config.user_agent_policy = StorageUserAgentPolicy(**kwargs)
-    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
-    config.logging_policy = StorageLoggingPolicy(**kwargs)
-    config.proxy_policy = ProxyPolicy(**kwargs)
-
-    # Storage settings
-    config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024)
-    config.copy_polling_interval = 15
-
-    # Block blob uploads
-    config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024)
-    config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1)
-    config.use_byte_buffer = kwargs.get("use_byte_buffer", False)
-
-    # Page blob uploads
-    config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024)
-
-    # Blob downloads
-    config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024)
-    config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024)
-
-    # File uploads
-    config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024)
-    return config
-
-
-def parse_query(query_str):
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
-    sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values]
-    sas_token = None
-    if sas_params:
-        sas_token = "&".join(sas_params)
-
-    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
-    return snapshot, sas_token
-
-
-def is_credential_sastoken(credential):
-    if not credential or not isinstance(credential, six.string_types):
-        return False
-
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = parse_qs(credential.lstrip("?"))
-    if parsed_query and all([k in sas_values for k in parsed_query.keys()]):
-        return True
-    return False
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,176 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-from azure.core.pipeline import AsyncPipeline
-from azure.core.async_paging import AsyncList
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline.policies import (
-    ContentDecodePolicy,
-    AsyncBearerTokenCredentialPolicy,
-    AsyncRedirectPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-)
-from azure.core.pipeline.transport import AsyncHttpTransport
-
-from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .authentication import SharedKeyCredentialPolicy
-from .base_client import create_configuration
-from .policies import (
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageHosts,
-    StorageHeadersPolicy,
-    QueueMessagePolicy
-)
-from .policies_async import AsyncStorageResponseHook
-
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import Pipeline
-    from azure.core.pipeline.transport import HttpRequest
-    from azure.core.configuration import Configuration
-_LOGGER = logging.getLogger(__name__)
-
-
-class AsyncStorageAccountHostsMixin(object):
-
-    def __enter__(self):
-        raise TypeError("Async client only supports 'async with'.")
-
-    def __exit__(self, *args):
-        pass
-
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-
-    async def __aexit__(self, *args):
-        await self._client.__aexit__(*args)
-
-    async def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        await self._client.close()
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, 'get_token'):
-            self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-        config = kwargs.get('_configuration') or create_configuration(**kwargs)
-        if kwargs.get('_pipeline'):
-            return config, kwargs['_pipeline']
-        config.transport = kwargs.get('transport')  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            try:
-                from azure.core.pipeline.transport import AioHttpTransport
-            except ImportError:
-                raise ImportError("Unable to create async transport. Please check aiohttp is installed.")
-            config.transport = AioHttpTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            AsyncRedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs), # type: ignore
-            config.retry_policy,
-            config.logging_policy,
-            AsyncStorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs),
-        ]
-        return config, AsyncPipeline(config.transport, policies=policies)
-
-    async def _batch_send(
-        self, *reqs: 'HttpRequest',
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ]
-        )
-
-        pipeline_response = await self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts() # Return an AsyncIterator
-            if raise_on_any_failure:
-                parts_list = []
-                async for part in parts:
-                    parts_list.append(part)
-                if any(p for p in parts_list if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts_list
-                    )
-                    raise error
-                return AsyncList(parts_list)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-
-class AsyncTransportWrapper(AsyncHttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, async_transport):
-        self._transport = async_transport
-
-    async def send(self, request, **kwargs):
-        return await self._transport.send(request, **kwargs)
-
-    async def open(self):
-        pass
-
-    async def close(self):
-        pass
-
-    async def __aenter__(self):
-        pass
-
-    async def __aexit__(self, *args):  # pylint: disable=arguments-differ
-        pass
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/constants.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/constants.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-from .._generated.version import VERSION
-
-
-X_MS_VERSION = VERSION
-
-# Socket timeout in seconds
-CONNECTION_TIMEOUT = 20
-READ_TIMEOUT = 20
-
-# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
-# The socket timeout is now the maximum total duration to send all data.
-if sys.version_info >= (3, 5):
-    # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
-    # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
-    READ_TIMEOUT = 2000
-
-STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
-
-SERVICE_HOST_BASE = 'core.windows.net'
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/encryption.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/encryption.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,542 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-from os import urandom
-from json import (
-    dumps,
-    loads,
-)
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from azure.core.exceptions import HttpResponseError
-
-from .._version import VERSION
-from . import encode_base64, decode_base64_to_bytes
-
-
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError('{0} should not be None.'.format(param_name))
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier,
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError("Unsupported encryption version.")
-    except KeyError:
-        raise ValueError("Unsupported encryption version.")
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
-        raise ValueError('Encryption version is not supported.')
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-    if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
-        raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
-
-
-def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
-    Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
-
-
-def encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the upload_data_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                 content, start_offset, end_offset, response_headers):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
-    except:  # pylint: disable=bare-except
-        if require_encryption:
-            raise ValueError(
-                'Encryption required, but received data does not contain appropriate metatadata.' + \
-                'Data was either not encrypted or metadata has been lost.')
-
-        return content
-
-    if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    blob_type = response_headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    if 'content-range' in response_headers:
-        content_range = response_headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
-
-
-def encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError('Message was not encrypted.')
-
-        return message
-    try:
-        return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception as error:
-        raise HttpResponseError(
-            message="Decryption failed.",
-            response=response,
-            error=error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,447 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-def get_enum_value(value):
-    if value is None or value in ["None", ""]:
-        return None
-    try:
-        return value.value
-    except AttributeError:
-        return value
-
-
-class StorageErrorCode(str, Enum):
-
-    # Generic storage values
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-
-    # Blob values
-    append_position_condition_not_met = "AppendPositionConditionNotMet"
-    blob_already_exists = "BlobAlreadyExists"
-    blob_not_found = "BlobNotFound"
-    blob_overwritten = "BlobOverwritten"
-    blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
-    block_count_exceeds_limit = "BlockCountExceedsLimit"
-    block_list_too_long = "BlockListTooLong"
-    cannot_change_to_lower_tier = "CannotChangeToLowerTier"
-    cannot_verify_copy_source = "CannotVerifyCopySource"
-    container_already_exists = "ContainerAlreadyExists"
-    container_being_deleted = "ContainerBeingDeleted"
-    container_disabled = "ContainerDisabled"
-    container_not_found = "ContainerNotFound"
-    content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
-    copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
-    copy_id_mismatch = "CopyIdMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-    incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
-    incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
-    incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
-    infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
-    invalid_blob_or_block = "InvalidBlobOrBlock"
-    invalid_blob_tier = "InvalidBlobTier"
-    invalid_blob_type = "InvalidBlobType"
-    invalid_block_id = "InvalidBlockId"
-    invalid_block_list = "InvalidBlockList"
-    invalid_operation = "InvalidOperation"
-    invalid_page_range = "InvalidPageRange"
-    invalid_source_blob_type = "InvalidSourceBlobType"
-    invalid_source_blob_url = "InvalidSourceBlobUrl"
-    invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
-    lease_already_present = "LeaseAlreadyPresent"
-    lease_already_broken = "LeaseAlreadyBroken"
-    lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
-    lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
-    lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
-    lease_id_missing = "LeaseIdMissing"
-    lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
-    lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
-    lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
-    lease_lost = "LeaseLost"
-    lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
-    lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
-    lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
-    max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
-    no_pending_copy_operation = "NoPendingCopyOperation"
-    operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
-    pending_copy_operation = "PendingCopyOperation"
-    previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
-    previous_snapshot_not_found = "PreviousSnapshotNotFound"
-    previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
-    sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
-    sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
-    snapshot_count_exceeded = "SnapshotCountExceeded"
-    snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
-    snapshots_present = "SnapshotsPresent"
-    source_condition_not_met = "SourceConditionNotMet"
-    system_in_use = "SystemInUse"
-    target_condition_not_met = "TargetConditionNotMet"
-    unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
-    blob_being_rehydrated = "BlobBeingRehydrated"
-    blob_archived = "BlobArchived"
-    blob_not_archived = "BlobNotArchived"
-
-    # Queue values
-    invalid_marker = "InvalidMarker"
-    message_not_found = "MessageNotFound"
-    message_too_large = "MessageTooLarge"
-    pop_receipt_mismatch = "PopReceiptMismatch"
-    queue_already_exists = "QueueAlreadyExists"
-    queue_being_deleted = "QueueBeingDeleted"
-    queue_disabled = "QueueDisabled"
-    queue_not_empty = "QueueNotEmpty"
-    queue_not_found = "QueueNotFound"
-
-    # File values
-    cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
-    client_cache_flush_delay = "ClientCacheFlushDelay"
-    delete_pending = "DeletePending"
-    directory_not_empty = "DirectoryNotEmpty"
-    file_lock_conflict = "FileLockConflict"
-    invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
-    parent_not_found = "ParentNotFound"
-    read_only_attribute = "ReadOnlyAttribute"
-    share_already_exists = "ShareAlreadyExists"
-    share_being_deleted = "ShareBeingDeleted"
-    share_disabled = "ShareDisabled"
-    share_not_found = "ShareNotFound"
-    sharing_violation = "SharingViolation"
-    share_snapshot_in_progress = "ShareSnapshotInProgress"
-    share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
-    share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
-    share_has_snapshots = "ShareHasSnapshots"
-    container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
-
-    # DataLake values
-    content_length_must_be_zero = 'ContentLengthMustBeZero'
-    path_already_exists = 'PathAlreadyExists'
-    invalid_flush_position = 'InvalidFlushPosition'
-    invalid_property_name = 'InvalidPropertyName'
-    invalid_source_uri = 'InvalidSourceUri'
-    unsupported_rest_version = 'UnsupportedRestVersion'
-    file_system_not_found = 'FilesystemNotFound'
-    path_not_found = 'PathNotFound'
-    rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound'
-    source_path_not_found = 'SourcePathNotFound'
-    destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted'
-    file_system_already_exists = 'FilesystemAlreadyExists'
-    file_system_being_deleted = 'FilesystemBeingDeleted'
-    invalid_destination_path = 'InvalidDestinationPath'
-    invalid_rename_source_path = 'InvalidRenameSourcePath'
-    invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType'
-    lease_is_already_broken = 'LeaseIsAlreadyBroken'
-    lease_name_mismatch = 'LeaseNameMismatch'
-    path_conflict = 'PathConflict'
-    source_path_is_being_deleted = 'SourcePathIsBeingDeleted'
-
-
-class DictMixin(object):
-
-    def __setitem__(self, key, item):
-        self.__dict__[key] = item
-
-    def __getitem__(self, key):
-        return self.__dict__[key]
-
-    def __repr__(self):
-        return str(self)
-
-    def __len__(self):
-        return len(self.keys())
-
-    def __delitem__(self, key):
-        self.__dict__[key] = None
-
-    def __eq__(self, other):
-        """Compare objects by comparing all attributes."""
-        if isinstance(other, self.__class__):
-            return self.__dict__ == other.__dict__
-        return False
-
-    def __ne__(self, other):
-        """Compare objects by comparing all attributes."""
-        return not self.__eq__(other)
-
-    def __str__(self):
-        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
-
-    def has_key(self, k):
-        return k in self.__dict__
-
-    def update(self, *args, **kwargs):
-        return self.__dict__.update(*args, **kwargs)
-
-    def keys(self):
-        return [k for k in self.__dict__ if not k.startswith('_')]
-
-    def values(self):
-        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def items(self):
-        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def get(self, key, default=None):
-        if key in self.__dict__:
-            return self.__dict__[key]
-        return default
-
-
-class LocationMode(object):
-    """
-    Specifies the location the request should be sent to. This mode only applies
-    for RA-GRS accounts which allow secondary read access. All other account types
-    must use PRIMARY.
-    """
-
-    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
-    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
-
-
-class ResourceTypes(object):
-    """
-    Specifies the resource types that are accessible with the account SAS.
-
-    :param bool service:
-        Access to service-level APIs (e.g., Get/Set Service Properties,
-        Get Service Stats, List Containers/Queues/Shares)
-    :param bool container:
-        Access to container-level APIs (e.g., Create/Delete Container,
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories)
-    :param bool object:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
-    """
-
-    def __init__(self, service=False, container=False, object=False):  # pylint: disable=redefined-builtin
-        self.service = service
-        self.container = container
-        self.object = object
-        self._str = (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create a ResourceTypes from a string.
-
-        To specify service, container, or object you need only to
-        include the first letter of the word in the string. E.g. service and container,
-        you would provide a string "sc".
-
-        :param str string: Specify service, container, or object in
-            in the string with the first letter of the word.
-        :return: A ResourceTypes object
-        :rtype: ~azure.storage.blob.ResourceTypes
-        """
-        res_service = 's' in string
-        res_container = 'c' in string
-        res_object = 'o' in string
-
-        parsed = cls(res_service, res_container, res_object)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class AccountSasPermissions(object):
-    """
-    :class:`~ResourceTypes` class to be used with generate_account_sas
-    function and for the AccessPolicies used with set_*_acl. There are two types of
-    SAS which may be used to grant resource access. One is to grant access to a
-    specific resource (resource-specific). Another is to grant access to the
-    entire service for a specific account and allow certain operations based on
-    perms found here.
-
-    :param bool read:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits read permissions to the specified resource type.
-    :param bool write:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits write permissions to the specified resource type.
-    :param bool delete:
-        Valid for Container and Object resource types, except for queue messages.
-    :param bool list:
-        Valid for Service and Container resource types only.
-    :param bool add:
-        Valid for the following Object resource types only: queue messages, and append blobs.
-    :param bool create:
-        Valid for the following Object resource types only: blobs and files.
-        Users can create new blobs or files, but may not overwrite existing
-        blobs or files.
-    :param bool update:
-        Valid for the following Object resource types only: queue messages.
-    :param bool process:
-        Valid for the following Object resource type only: queue messages.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
-                 add=False, create=False, update=False, process=False):
-        self.read = read
-        self.write = write
-        self.delete = delete
-        self.list = list
-        self.add = add
-        self.create = create
-        self.update = update
-        self.process = process
-        self._str = (('r' if self.read else '') +
-                     ('w' if  self.write else '') +
-                     ('d' if self.delete else '') +
-                     ('l' if self.list else '') +
-                     ('a' if self.add else '') +
-                     ('c' if self.create else '') +
-                     ('u' if self.update else '') +
-                     ('p' if self.process else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create AccountSasPermissions from a string.
-
-        To specify read, write, delete, etc. permissions you need only to
-        include the first letter of the word in the string. E.g. for read and write
-        permissions you would provide a string "rw".
-
-        :param str permission: Specify permissions in
-            the string with the first letter of the word.
-        :return: A AccountSasPermissions object
-        :rtype: ~azure.storage.blob.AccountSasPermissions
-        """
-        p_read = 'r' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-        p_list = 'l' in permission
-        p_add = 'a' in permission
-        p_create = 'c' in permission
-        p_update = 'u' in permission
-        p_process = 'p' in permission
-
-        parsed = cls(p_read, p_write, p_delete, p_list, p_add, p_create, p_update, p_process)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-class Services(object):
-    """Specifies the services accessible with the account SAS.
-
-    :param bool blob:
-        Access for the `~azure.storage.blob.BlobServiceClient`
-    :param bool queue:
-        Access for the `~azure.storage.queue.QueueServiceClient`
-    :param bool fileshare:
-        Access for the `~azure.storage.fileshare.ShareServiceClient`
-    """
-
-    def __init__(self, blob=False, queue=False, fileshare=False):
-        self.blob = blob
-        self.queue = queue
-        self.fileshare = fileshare
-        self._str = (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('f' if self.fileshare else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create Services from a string.
-
-        To specify blob, queue, or file you need only to
-        include the first letter of the word in the string. E.g. for blob and queue
-        you would provide a string "bq".
-
-        :param str string: Specify blob, queue, or file in
-            in the string with the first letter of the word.
-        :return: A Services object
-        :rtype: ~azure.storage.blob.Services
-        """
-        res_blob = 'b' in string
-        res_queue = 'q' in string
-        res_file = 'f' in string
-
-        parsed = cls(res_blob, res_queue, res_file)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class UserDelegationKey(object):
-    """
-    Represents a user delegation key, provided to the user by Azure Storage
-    based on their Azure Active Directory access token.
-
-    The fields are saved as simple strings since the user does not have to interact with this object;
-    to generate an identify SAS, the user can simply pass it to the right API.
-
-    :ivar str signed_oid:
-        Object ID of this token.
-    :ivar str signed_tid:
-        Tenant ID of the tenant that issued this token.
-    :ivar str signed_start:
-        The datetime this token becomes valid.
-    :ivar str signed_expiry:
-        The datetime this token expires.
-    :ivar str signed_service:
-        What service this key is valid for.
-    :ivar str signed_version:
-        The version identifier of the REST service that created this token.
-    :ivar str value:
-        The user delegation key.
-    """
-    def __init__(self):
-        self.signed_oid = None
-        self.signed_tid = None
-        self.signed_start = None
-        self.signed_expiry = None
-        self.signed_service = None
-        self.signed_version = None
-        self.value = None
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/parser.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/parser.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/parser.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/parser.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):  # pylint: disable=undefined-variable
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,638 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import re
-import random
-from time import time
-from io import SEEK_SET, UnsupportedOperation
-import logging
-import uuid
-import types
-import platform
-from typing import Any, TYPE_CHECKING
-from wsgiref.handlers import format_date_time
-try:
-    from urllib.parse import (
-        urlparse,
-        parse_qsl,
-        urlunparse,
-        urlencode,
-    )
-except ImportError:
-    from urllib import urlencode # type: ignore
-    from urlparse import ( # type: ignore
-        urlparse,
-        parse_qsl,
-        urlunparse,
-    )
-
-from azure.core.pipeline.policies import (
-    HeadersPolicy,
-    SansIOHTTPPolicy,
-    NetworkTraceLoggingPolicy,
-    HTTPPolicy,
-    RequestHistory
-)
-from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
-
-from .._version import VERSION
-from .models import LocationMode
-
-try:
-    _unicode_type = unicode # type: ignore
-except NameError:
-    _unicode_type = str
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def is_exhausted(settings):
-    """Are we out of retries?"""
-    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
-    retry_counts = list(filter(None, retry_counts))
-    if not retry_counts:
-        return False
-    return min(retry_counts) < 0
-
-
-def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
-
-
-def is_retry(response, mode):
-    """Is this method/status code retryable? (Based on whitelists and control
-    variables such as the number of total retries to allow, whether to
-    respect the Retry-After header, whether this header is present, and
-    whether the returned status code is on the list of status codes to
-    be retried upon on the presence of the aforementioned header)
-    """
-    status = response.http_response.status_code
-    if 300 <= status < 500:
-        # An exception occured, but in most cases it was expected. Examples could
-        # include a 309 Conflict or 412 Precondition Failed.
-        if status == 404 and mode == LocationMode.SECONDARY:
-            # Response code 404 should be retried if secondary was used.
-            return True
-        if status == 408:
-            # Response code 408 is a timeout and should be retried.
-            return True
-        return False
-    if status >= 500:
-        # Response codes above 500 with the exception of 501 Not Implemented and
-        # 505 Version Not Supported indicate a server issue and should be retried.
-        if status in [501, 505]:
-            return False
-        return True
-    return False
-
-
-def urljoin(base_url, stub_url):
-    parsed = urlparse(base_url)
-    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
-    return parsed.geturl()
-
-
-class QueueMessagePolicy(SansIOHTTPPolicy):
-
-    def on_request(self, request):
-        message_id = request.context.options.pop('queue_message_id', None)
-        if message_id:
-            request.http_request.url = urljoin(
-                request.http_request.url,
-                message_id)
-
-
-class StorageHeadersPolicy(HeadersPolicy):
-    request_id_header_name = 'x-ms-client-request-id'
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        super(StorageHeadersPolicy, self).on_request(request)
-        current_time = format_date_time(time())
-        request.http_request.headers['x-ms-date'] = current_time
-
-        custom_id = request.context.options.pop('client_request_id', None)
-        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
-
-    # def on_response(self, request, response):
-    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
-    #     if self.request_id_header_name in response.http_response.headers:
-
-    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
-
-    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
-    #             raise AzureError(
-    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
-    #                 "Service request ID: {}".format(
-    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
-    #                     response.http_response.headers['x-ms-request-id']),
-    #                 response=response.http_response
-    #             )
-
-
-class StorageHosts(SansIOHTTPPolicy):
-
-    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
-        self.hosts = hosts
-        super(StorageHosts, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        request.context.options['hosts'] = self.hosts
-        parsed_url = urlparse(request.http_request.url)
-
-        # Detect what location mode we're currently requesting with
-        location_mode = LocationMode.PRIMARY
-        for key, value in self.hosts.items():
-            if parsed_url.netloc == value:
-                location_mode = key
-
-        # See if a specific location mode has been specified, and if so, redirect
-        use_location = request.context.options.pop('use_location', None)
-        if use_location:
-            # Lock retries to the specific location
-            request.context.options['retry_to_secondary'] = False
-            if use_location not in self.hosts:
-                raise ValueError("Attempting to use undefined host location {}".format(use_location))
-            if use_location != location_mode:
-                # Update request URL to use the specified location
-                updated = parsed_url._replace(netloc=self.hosts[use_location])
-                request.http_request.url = updated.geturl()
-                location_mode = use_location
-
-        request.context.options['location_mode'] = location_mode
-
-
-class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
-    """A policy that logs HTTP request and response to the DEBUG logger.
-
-    This accepts both global configuration, and per-request level with "enable_http_logger"
-    """
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        http_request = request.http_request
-        options = request.context.options
-        if options.pop("logging_enable", self.enable_http_logger):
-            request.context["logging_enable"] = True
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                log_url = http_request.url
-                query_params = http_request.query
-                if 'sig' in query_params:
-                    log_url = log_url.replace(query_params['sig'], "sig=*****")
-                _LOGGER.debug("Request URL: %r", log_url)
-                _LOGGER.debug("Request method: %r", http_request.method)
-                _LOGGER.debug("Request headers:")
-                for header, value in http_request.headers.items():
-                    if header.lower() == 'authorization':
-                        value = '*****'
-                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
-                        # take the url apart and scrub away the signed signature
-                        scheme, netloc, path, params, query, fragment = urlparse(value)
-                        parsed_qs = dict(parse_qsl(query))
-                        parsed_qs['sig'] = '*****'
-
-                        # the SAS needs to be put back together
-                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
-
-                    _LOGGER.debug("    %r: %r", header, value)
-                _LOGGER.debug("Request body:")
-
-                # We don't want to log the binary data of a file upload.
-                if isinstance(http_request.body, types.GeneratorType):
-                    _LOGGER.debug("File upload")
-                else:
-                    _LOGGER.debug(str(http_request.body))
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log request: %r", err)
-
-    def on_response(self, request, response):
-        # type: (PipelineRequest, PipelineResponse, Any) -> None
-        if response.context.pop("logging_enable", self.enable_http_logger):
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                _LOGGER.debug("Response status: %r", response.http_response.status_code)
-                _LOGGER.debug("Response headers:")
-                for res_header, value in response.http_response.headers.items():
-                    _LOGGER.debug("    %r: %r", res_header, value)
-
-                # We don't want to log binary data if the response is a file.
-                _LOGGER.debug("Response content:")
-                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
-                header = response.http_response.headers.get('content-disposition')
-
-                if header and pattern.match(header):
-                    filename = header.partition('=')[2]
-                    _LOGGER.debug("File attachments: %s", filename)
-                elif response.http_response.headers.get("content-type", "").endswith("octet-stream"):
-                    _LOGGER.debug("Body contains binary data.")
-                elif response.http_response.headers.get("content-type", "").startswith("image"):
-                    _LOGGER.debug("Body contains image data.")
-                else:
-                    if response.context.options.get('stream', False):
-                        _LOGGER.debug("Body is streamable")
-                    else:
-                        _LOGGER.debug(response.http_response.text())
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log response: %s", repr(err))
-
-
-class StorageUserAgentPolicy(SansIOHTTPPolicy):
-
-    _USERAGENT = "User-Agent"
-
-    def __init__(self, **kwargs):
-        self._application = kwargs.pop('user_agent', None)
-        storage_sdk = kwargs.pop('storage_sdk')
-        self._user_agent = "azsdk-python-storage-{}/{} Python/{} ({})".format(
-            storage_sdk,
-            VERSION,
-            platform.python_version(),
-            platform.platform())
-        super(StorageUserAgentPolicy, self).__init__()
-
-    def on_request(self, request):
-        existing = request.http_request.headers.get(self._USERAGENT, "")
-        app_string = request.context.options.pop('user_agent', None) or self._application
-        if app_string:
-            request.http_request.headers[self._USERAGENT] = "{} {}".format(
-                app_string, self._user_agent)
-        else:
-            request.http_request.headers[self._USERAGENT] = self._user_agent
-        if existing:
-            request.http_request.headers[self._USERAGENT] += " " + existing
-
-
-class StorageRequestHook(SansIOHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._request_callback = kwargs.get('raw_request_hook')
-        super(StorageRequestHook, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, **Any) -> PipelineResponse
-        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
-        if request_callback:
-            request_callback(request)
-
-
-class StorageResponseHook(HTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(StorageResponseHook, self).__init__()
-
-    def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = self.next.send(request)
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-
-class StorageContentValidation(SansIOHTTPPolicy):
-    """A simple policy that sends the given headers
-    with the request.
-
-    This will overwrite any headers already defined in the request.
-    """
-    header_name = 'Content-MD5'
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        super(StorageContentValidation, self).__init__()
-
-    @staticmethod
-    def get_content_md5(data):
-        md5 = hashlib.md5()
-        if isinstance(data, bytes):
-            md5.update(data)
-        elif hasattr(data, 'read'):
-            pos = 0
-            try:
-                pos = data.tell()
-            except:  # pylint: disable=bare-except
-                pass
-            for chunk in iter(lambda: data.read(4096), b""):
-                md5.update(chunk)
-            try:
-                data.seek(pos, SEEK_SET)
-            except (AttributeError, IOError):
-                raise ValueError("Data should be bytes or a seekable file-like object.")
-        else:
-            raise ValueError("Data should be bytes or a seekable file-like object.")
-
-        return md5.digest()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        validate_content = request.context.options.pop('validate_content', False)
-        if validate_content and request.http_request.method != 'GET':
-            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
-            request.http_request.headers[self.header_name] = computed_md5
-            request.context['validate_content_md5'] = computed_md5
-        request.context['validate_content'] = validate_content
-
-    def on_response(self, request, response):
-        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
-            computed_md5 = request.context.get('validate_content_md5') or \
-                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
-            if response.http_response.headers['content-md5'] != computed_md5:
-                raise AzureError(
-                    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format(
-                        response.http_response.headers['content-md5'], computed_md5),
-                    response=response.http_response
-                )
-
-
-class StorageRetryPolicy(HTTPPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    def __init__(self, **kwargs):
-        self.total_retries = kwargs.pop('retry_total', 10)
-        self.connect_retries = kwargs.pop('retry_connect', 3)
-        self.read_retries = kwargs.pop('retry_read', 3)
-        self.status_retries = kwargs.pop('retry_status', 3)
-        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
-        super(StorageRetryPolicy, self).__init__()
-
-    def _set_next_host_location(self, settings, request):  # pylint: disable=no-self-use
-        """
-        A function which sets the next host location on the request, if applicable.
-
-        :param ~azure.storage.models.RetryContext context:
-            The retry context containing the previous host location and the request
-            to evaluate and possibly modify.
-        """
-        if settings['hosts'] and all(settings['hosts'].values()):
-            url = urlparse(request.url)
-            # If there's more than one possible location, retry to the alternative
-            if settings['mode'] == LocationMode.PRIMARY:
-                settings['mode'] = LocationMode.SECONDARY
-            else:
-                settings['mode'] = LocationMode.PRIMARY
-            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
-            request.url = updated.geturl()
-
-    def configure_retries(self, request):  # pylint: disable=no-self-use
-        body_position = None
-        if hasattr(request.http_request.body, 'read'):
-            try:
-                body_position = request.http_request.body.tell()
-            except (AttributeError, UnsupportedOperation):
-                # if body position cannot be obtained, then retries will not work
-                pass
-        options = request.context.options
-        return {
-            'total': options.pop("retry_total", self.total_retries),
-            'connect': options.pop("retry_connect", self.connect_retries),
-            'read': options.pop("retry_read", self.read_retries),
-            'status': options.pop("retry_status", self.status_retries),
-            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
-            'mode': options.pop("location_mode", LocationMode.PRIMARY),
-            'hosts': options.pop("hosts", None),
-            'hook': options.pop("retry_hook", None),
-            'body_position': body_position,
-            'count': 0,
-            'history': []
-        }
-
-    def get_backoff_time(self, settings):  # pylint: disable=unused-argument,no-self-use
-        """ Formula for computing the current backoff.
-        Should be calculated by child class.
-
-        :rtype: float
-        """
-        return 0
-
-    def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        transport.sleep(backoff)
-
-    def increment(self, settings, request, response=None, error=None):
-        """Increment the retry counters.
-
-        :param response: A pipeline response object.
-        :param error: An error encountered during the request, or
-            None if the response was received successfully.
-
-        :return: Whether the retry attempts are exhausted.
-        """
-        settings['total'] -= 1
-
-        if error and isinstance(error, ServiceRequestError):
-            # Errors when we're fairly sure that the server did not receive the
-            # request, so it should be safe to retry.
-            settings['connect'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        elif error and isinstance(error, ServiceResponseError):
-            # Errors that occur after the request has been started, so we should
-            # assume that the server began processing it.
-            settings['read'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        else:
-            # Incrementing because of a server error like a 500 in
-            # status_forcelist and a the given method is in the whitelist
-            if response:
-                settings['status'] -= 1
-                settings['history'].append(RequestHistory(request, http_response=response))
-
-        if not is_exhausted(settings):
-            if request.method not in ['PUT'] and settings['retry_secondary']:
-                self._set_next_host_location(settings, request)
-
-            # rewind the request body if it is a stream
-            if request.body and hasattr(request.body, 'read'):
-                # no position was saved, then retry would not work
-                if settings['body_position'] is None:
-                    return False
-                try:
-                    # attempt to rewind the body to the initial position
-                    request.body.seek(settings['body_position'], SEEK_SET)
-                except (UnsupportedOperation, ValueError):
-                    # if body is not seekable, then retry would not work
-                    return False
-            settings['count'] += 1
-            return True
-        return False
-
-    def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(StorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(StorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,219 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import asyncio
-import random
-import logging
-from typing import Any, TYPE_CHECKING
-
-from azure.core.pipeline.policies import AsyncHTTPPolicy
-from azure.core.exceptions import AzureError
-
-from .policies import is_retry, StorageRetryPolicy
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-async def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        if asyncio.iscoroutine(settings['hook']):
-            await settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-        else:
-            settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-
-
-class AsyncStorageResponseHook(AsyncHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(AsyncStorageResponseHook, self).__init__()
-
-    async def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = await self.next.send(request)
-        await response.http_response.load_body()
-
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            if asyncio.iscoroutine(response_callback):
-                await response_callback(response)
-            else:
-                response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-class AsyncStorageRetryPolicy(StorageRetryPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    async def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        await transport.sleep(backoff)
-
-    async def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = await self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        await retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        await self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    await retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    await self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(AsyncStorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(AsyncStorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/request_handlers.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/request_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/request_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/request_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,147 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-
-import logging
-from os import fstat
-from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
-
-import isodate
-
-from azure.core.exceptions import raise_with_traceback
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def serialize_iso(attr):
-    """Serialize Datetime object into ISO-8601 formatted string.
-
-    :param Datetime attr: Object to be serialized.
-    :rtype: str
-    :raises: ValueError if format invalid.
-    """
-    if not attr:
-        return None
-    if isinstance(attr, str):
-        attr = isodate.parse_datetime(attr)
-    try:
-        utc = attr.utctimetuple()
-        if utc.tm_year > 9999 or utc.tm_year < 1:
-            raise OverflowError("Hit max or min date")
-
-        date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
-            utc.tm_year, utc.tm_mon, utc.tm_mday,
-            utc.tm_hour, utc.tm_min, utc.tm_sec)
-        return date + 'Z'
-    except (ValueError, OverflowError) as err:
-        msg = "Unable to serialize datetime object."
-        raise_with_traceback(ValueError, msg, err)
-    except AttributeError as err:
-        msg = "ISO-8601 object must be valid Datetime object."
-        raise_with_traceback(TypeError, msg, err)
-
-
-def get_length(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:  # pylint: disable=bare-except
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            try:
-                return fstat(fileno).st_size
-            except OSError:
-                # Not a valid fileno, may be possible requests returned
-                # a socket number?
-                pass
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
-
-
-def read_length(data):
-    try:
-        if hasattr(data, 'read'):
-            read_data = b''
-            for chunk in iter(lambda: data.read(4096), b""):
-                read_data += chunk
-            return len(read_data), read_data
-        if hasattr(data, '__iter__'):
-            read_data = b''
-            for chunk in data:
-                read_data += chunk
-            return len(read_data), read_data
-    except:  # pylint: disable=bare-except
-        pass
-    raise ValueError("Unable to calculate content length, please specify.")
-
-
-def validate_and_format_range_headers(
-        start_range, end_range, start_range_required=True,
-        end_range_required=True, check_content_md5=False, align_to_page=False):
-    # If end range is provided, start range must be provided
-    if (start_range_required or end_range is not None) and start_range is None:
-        raise ValueError("start_range value cannot be None.")
-    if end_range_required and end_range is None:
-        raise ValueError("end_range value cannot be None.")
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError("Invalid page blob start_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(start_range))
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError("Invalid page blob end_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(end_range))
-
-    # Format based on whether end_range is present
-    range_header = None
-    if end_range is not None:
-        range_header = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        range_header = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    range_validation = None
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError("Both start and end range requied for MD5 content validation.")
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
-        range_validation = 'true'
-
-    return range_header, range_validation
-
-
-def add_metadata_headers(metadata=None):
-    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
-    headers = {}
-    if metadata:
-        for key, value in metadata.items():
-            headers['x-ms-meta-{}'.format(key)] = value
-    return headers
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/response_handlers.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/response_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/response_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/response_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.exceptions import (
-    HttpResponseError,
-    ResourceNotFoundError,
-    ResourceModifiedError,
-    ResourceExistsError,
-    ClientAuthenticationError,
-    DecodeError)
-
-from .parser import _to_utc_datetime
-from .models import StorageErrorCode, UserDelegationKey, get_enum_value
-
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.exceptions import AzureError
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-class PartialBatchErrorException(HttpResponseError):
-    """There is a partial failure in batch operations.
-
-    :param str message: The message of the exception.
-    :param response: Server response to be deserialized.
-    :param list parts: A list of the parts in multipart response.
-    """
-
-    def __init__(self, message, response, parts):
-        self.parts = parts
-        super(PartialBatchErrorException, self).__init__(message=message, response=response)
-
-
-def parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def normalize_headers(headers):
-    normalized = {}
-    for key, value in headers.items():
-        if key.startswith('x-ms-'):
-            key = key[5:]
-        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
-    return normalized
-
-
-def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
-    raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
-    return {k[10:]: v for k, v in raw_metadata.items()}
-
-
-def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers)
-
-
-def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers), deserialized
-
-
-def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return response.location_mode, deserialized
-
-
-def process_storage_error(storage_error):
-    raise_error = HttpResponseError
-    error_code = storage_error.response.headers.get('x-ms-error-code')
-    error_message = storage_error.message
-    additional_data = {}
-    try:
-        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
-        if error_body:
-            for info in error_body.iter():
-                if info.tag.lower() == 'code':
-                    error_code = info.text
-                elif info.tag.lower() == 'message':
-                    error_message = info.text
-                else:
-                    additional_data[info.tag] = info.text
-    except DecodeError:
-        pass
-
-    try:
-        if error_code:
-            error_code = StorageErrorCode(error_code)
-            if error_code in [StorageErrorCode.condition_not_met,
-                              StorageErrorCode.blob_overwritten]:
-                raise_error = ResourceModifiedError
-            if error_code in [StorageErrorCode.invalid_authentication_info,
-                              StorageErrorCode.authentication_failed]:
-                raise_error = ClientAuthenticationError
-            if error_code in [StorageErrorCode.resource_not_found,
-                              StorageErrorCode.cannot_verify_copy_source,
-                              StorageErrorCode.blob_not_found,
-                              StorageErrorCode.queue_not_found,
-                              StorageErrorCode.container_not_found,
-                              StorageErrorCode.parent_not_found,
-                              StorageErrorCode.share_not_found]:
-                raise_error = ResourceNotFoundError
-            if error_code in [StorageErrorCode.account_already_exists,
-                              StorageErrorCode.account_being_created,
-                              StorageErrorCode.resource_already_exists,
-                              StorageErrorCode.resource_type_mismatch,
-                              StorageErrorCode.blob_already_exists,
-                              StorageErrorCode.queue_already_exists,
-                              StorageErrorCode.container_already_exists,
-                              StorageErrorCode.container_being_deleted,
-                              StorageErrorCode.queue_being_deleted,
-                              StorageErrorCode.share_already_exists,
-                              StorageErrorCode.share_being_deleted]:
-                raise_error = ResourceExistsError
-    except ValueError:
-        # Got an unknown error code
-        pass
-
-    try:
-        error_message += "\nErrorCode:{}".format(error_code.value)
-    except AttributeError:
-        error_message += "\nErrorCode:{}".format(error_code)
-    for name, info in additional_data.items():
-        error_message += "\n{}:{}".format(name, info)
-
-    error = raise_error(message=error_message, response=storage_error.response)
-    error.error_code = error_code
-    error.additional_info = additional_data
-    raise error
-
-
-def parse_to_internal_user_delegation_key(service_user_delegation_key):
-    internal_user_delegation_key = UserDelegationKey()
-    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
-    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
-    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
-    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
-    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
-    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
-    internal_user_delegation_key.value = service_user_delegation_key.value
-    return internal_user_delegation_key
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,209 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from datetime import date
-
-from .parser import _str, _to_utc_datetime
-from .constants import X_MS_VERSION
-from . import sign_string, url_quote
-
-
-class QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-    SIGNED_OID = 'skoid'
-    SIGNED_TID = 'sktid'
-    SIGNED_KEY_START = 'skt'
-    SIGNED_KEY_EXPIRY = 'ske'
-    SIGNED_KEY_SERVICE = 'sks'
-    SIGNED_KEY_VERSION = 'skv'
-
-    @staticmethod
-    def to_list():
-        return [
-            QueryStringConstants.SIGNED_SIGNATURE,
-            QueryStringConstants.SIGNED_PERMISSION,
-            QueryStringConstants.SIGNED_START,
-            QueryStringConstants.SIGNED_EXPIRY,
-            QueryStringConstants.SIGNED_RESOURCE,
-            QueryStringConstants.SIGNED_IDENTIFIER,
-            QueryStringConstants.SIGNED_IP,
-            QueryStringConstants.SIGNED_PROTOCOL,
-            QueryStringConstants.SIGNED_VERSION,
-            QueryStringConstants.SIGNED_CACHE_CONTROL,
-            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
-            QueryStringConstants.SIGNED_CONTENT_ENCODING,
-            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
-            QueryStringConstants.SIGNED_CONTENT_TYPE,
-            QueryStringConstants.START_PK,
-            QueryStringConstants.START_RK,
-            QueryStringConstants.END_PK,
-            QueryStringConstants.END_RK,
-            QueryStringConstants.SIGNED_RESOURCE_TYPES,
-            QueryStringConstants.SIGNED_SERVICES,
-            QueryStringConstants.SIGNED_OID,
-            QueryStringConstants.SIGNED_TID,
-            QueryStringConstants.SIGNED_KEY_START,
-            QueryStringConstants.SIGNED_KEY_EXPIRY,
-            QueryStringConstants.SIGNED_KEY_SERVICE,
-            QueryStringConstants.SIGNED_KEY_VERSION,
-        ]
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service
-        or to create a new account object.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account
-            SAS. You can combine values to provide access to more than one
-            resource type.
-        :param AccountSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy. You can combine
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _str(val) if val is not None else None
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(QueryStringConstants.SIGNED_START, start)
-        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, policy_id):
-        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(QueryStringConstants.SIGNED_START) +
-             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,548 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from concurrent import futures
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from threading import Lock
-from itertools import islice
-from math import ceil
-
-import six
-
-from azure.core.tracing.common import with_current_context
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
-
-
-def _parallel_uploads(executor, uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(executor.submit(with_current_context(uploader), next_chunk))
-
-    # Wait for the remaining uploads to finish
-    done, _running = futures.wait(running)
-    range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        validate_content=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        validate_content=validate_content,
-        **kwargs)
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_chunk), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_substream_block), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b""
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError("Blob data should be of type bytes.")
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b"" or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_lock is not None:
-            with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop("modified_access_conditions", None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return index, block_id
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        return not any(bytearray(chunk_data))
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-            self.current_length = int(self.response_headers["blob_append_offset"])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        length = len(chunk_data)
-        chunk_end = chunk_offset + length - 1
-        response = self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            length,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
-
-
-class SubStream(IOBase):
-
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = (
-            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        )
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-        super(SubStream, self).__init__()
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, size=None):
-        if self.closed:  # pylint: disable=using-constant-test
-            raise ValueError("Stream is closed.")
-
-        if size is None:
-            size = self._length - self._position
-
-        # adjust if out of bounds
-        if size + self._position >= self._length:
-            size = self._length - self._position
-
-        # return fast
-        if size == 0 or self._buffer.closed:
-            return b""
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(size)
-        bytes_read = len(read_buffer)
-        bytes_remaining = size - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_concurrency > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = -offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
-
-
-class IterStreamer(object):
-    """
-    File-like streaming iterator.
-    """
-
-    def __init__(self, generator, encoding="UTF-8"):
-        self.generator = generator
-        self.iterator = iter(generator)
-        self.leftover = b""
-        self.encoding = encoding
-
-    def __len__(self):
-        return self.generator.__len__()
-
-    def __iter__(self):
-        return self.iterator
-
-    def seekable(self):
-        return False
-
-    def next(self):
-        return next(self.iterator)
-
-    def tell(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator does not support tell.")
-
-    def seek(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator is unseekable.")
-
-    def read(self, size):
-        data = self.leftover
-        count = len(self.leftover)
-        try:
-            while count < size:
-                chunk = self.next()
-                if isinstance(chunk, six.text_type):
-                    chunk = chunk.encode(self.encoding)
-                data += chunk
-                count += len(chunk)
-        except StopIteration:
-            pass
-
-        if count > size:
-            self.leftover = data[size:]
-
-        return data[:size]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,350 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-import asyncio
-from asyncio import Lock
-from itertools import islice
-import threading
-
-from math import ceil
-
-import six
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-
-
-async def _parallel_uploads(uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(asyncio.ensure_future(uploader(next_chunk)))
-
-    # Wait for the remaining uploads to finish
-    if running:
-        done, _running = await asyncio.wait(running)
-        range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-async def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_chunk(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for chunk in uploader.get_chunk_streams():
-            range_ids.append(await uploader.process_chunk(chunk))
-
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-async def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_substream_block(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for block in uploader.get_substream_blocks():
-            range_ids.append(await uploader.process_substream_block(block))
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError('Blob data should be of type bytes.')
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    async def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    async def _update_progress(self, length):
-        if self.progress_lock is not None:
-            async with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = await self._upload_chunk(chunk_offset, chunk_data)
-        await self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    async def process_substream_block(self, block_data):
-        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = await self._upload_substream_block(block_id, block_stream)
-        await self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop('modified_access_conditions', None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        await self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options)
-        return index, block_id
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        try:
-            await self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        for each_byte in chunk_data:
-            if each_byte not in [0, b'\x00']:
-                return False
-        return True
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = await self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-            self.current_length = int(self.response_headers['blob_append_offset'])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        chunk_end = chunk_offset + len(chunk_data) - 1
-        response = await self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            chunk_end,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-        return range_id, response
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,380 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from azure.storage.blob import generate_account_sas as generate_blob_account_sas
-from azure.storage.blob import generate_container_sas, generate_blob_sas
-
-
-def generate_account_sas(
-        account_name,  # type: str
-        account_key,  # type: str
-        resource_types,  # type: Union[ResourceTypes, str]
-        permission,  # type: Union[AccountSasPermissions, str]
-        expiry,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):  # type: (...) -> str
-    """Generates a shared access signature for the blob service.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str account_key:
-        The access key to generate the shared access signature.
-    :param resource_types:
-        Specifies the resource types that are accessible with the account SAS.
-    :type resource_types: str or ~azure.storage.blob.ResourceTypes
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.AccountSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../tests/test_blob_samples_authentication.py
-            :start-after: [START create_sas_token]
-            :end-before: [END create_sas_token]
-            :language: python
-            :dedent: 8
-            :caption: Generating a shared access signature.
-    """
-    return generate_blob_account_sas(
-        account_name=account_name,
-        account_key=account_key,
-        resource_types=resource_types,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs
-    )
-
-
-def generate_file_system_sas(
-        account_name,  # type: str
-        file_system_name,  # type: str
-        account_key=None,  # type: Optional[str]
-        user_delegation_key=None,  # type: Optional[UserDelegationKey]
-        permission=None,  # type: Optional[Union[FileSystemSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):
-    # type: (...) -> str
-    """Generates a shared access signature for a container.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str file_system_name:
-        The name of the file system.
-    :param str account_key:
-        The access key to generate the shared access signature. Either `account_key` or
-        `user_delegation_key` must be specified.
-    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
-        Instead of an account key, the user could pass in a user delegation key.
-        A user delegation key can be obtained from the service by authenticating with an AAD identity;
-        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
-        When present, the SAS is signed with the user delegation key instead.
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.ContainerSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../tests/test_blob_samples_containers.py
-            :start-after: [START generate_sas_token]
-            :end-before: [END generate_sas_token]
-            :language: python
-            :dedent: 12
-            :caption: Generating a sas token.
-    """
-    return generate_container_sas(
-        account_name=account_name,
-        container_name=file_system_name,
-        account_key=account_key,
-        user_delegation_key=user_delegation_key,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs)
-
-
-def generate_directory_sas(
-        account_name,  # type: str
-        file_system_name,  # type: str
-        directory_name,  # type: str
-        account_key=None,  # type: Optional[str]
-        user_delegation_key=None,  # type: Optional[UserDelegationKey]
-        permission=None,  # type: Optional[Union[BlobSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):
-    # type: (...) -> str
-    """Generates a shared access signature for a blob.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str container_name:
-        The name of the container.
-    :param str blob_name:
-        The name of the blob.
-    :param str snapshot:
-        An optional blob snapshot ID.
-    :param str account_key:
-        The access key to generate the shared access signature. Either `account_key` or
-        `user_delegation_key` must be specified.
-    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
-        Instead of an account key, the user could pass in a user delegation key.
-        A user delegation key can be obtained from the service by authenticating with an AAD identity;
-        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
-        When present, the SAS is signed with the user delegation key instead.
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.BlobSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-    """
-    return generate_blob_sas(
-        account_name=account_name,
-        container_name=file_system_name,
-        blob_name=directory_name,
-        account_key=account_key,
-        user_delegation_key=user_delegation_key,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs)
-
-
-def generate_file_sas(
-        account_name,  # type: str
-        file_system_name,  # type: str
-        directory_name,  # type: str
-        file_name,  # type: str
-        account_key=None,  # type: Optional[str]
-        user_delegation_key=None,  # type: Optional[UserDelegationKey]
-        permission=None,  # type: Optional[Union[BlobSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):
-    # type: (...) -> str
-    """Generates a shared access signature for a blob.
-
-    Use the returned signature with the credential parameter of any BlobServiceClient,
-    ContainerClient or BlobClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str container_name:
-        The name of the container.
-    :param str blob_name:
-        The name of the blob.
-    :param str snapshot:
-        An optional blob snapshot ID.
-    :param str account_key:
-        The access key to generate the shared access signature. Either `account_key` or
-        `user_delegation_key` must be specified.
-    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
-        Instead of an account key, the user could pass in a user delegation key.
-        A user delegation key can be obtained from the service by authenticating with an AAD identity;
-        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
-        When present, the SAS is signed with the user delegation key instead.
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.blob.BlobSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-    """
-    if directory_name:
-        path = directory_name.rstrip('/') + "/" + file_name
-    else:
-        path = file_name
-    return generate_blob_sas(
-        account_name=account_name,
-        container_name=file_system_name,
-        blob_name=path,
-        account_key=account_key,
-        user_delegation_key=user_delegation_key,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-VERSION = "12.0.0b7"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from .._shared.policies_async import ExponentialRetry, LinearRetry
-from ._data_lake_file_client_async import DataLakeFileClient
-from ._data_lake_directory_client_async import DataLakeDirectoryClient
-from ._file_system_client_async import FileSystemClient
-from ._data_lake_service_client_async import DataLakeServiceClient
-from ._data_lake_lease_async import DataLakeLeaseClient
-
-__all__ = [
-    'DataLakeServiceClient',
-    'FileSystemClient',
-    'DataLakeDirectoryClient',
-    'DataLakeFileClient',
-    'DataLakeLeaseClient',
-    'ExponentialRetry',
-    'LinearRetry',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,506 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from ._data_lake_file_client_async import DataLakeFileClient
-from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase
-from .._models import DirectoryProperties
-from ._path_client_async import PathClient
-
-
-class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase):
-    """A client to interact with the DataLake directory, even if the directory may not yet exist.
-
-    For operations relating to a specific subdirectory or file under the directory, a directory client or file client
-    can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
-
-    :ivar str url:
-        The full endpoint URL to the file system, including SAS token if used.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URI to the storage account.
-    :param file_system_name:
-        The file system for the directory or files.
-    :type file_system_name: str
-    :param directory_name:
-        The whole path of the directory. eg. {directory under file system}/{directory to interact with}
-    :type directory_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client]
-            :end-before: [END create_datalake_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client_oauth]
-            :end-before: [END create_datalake_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            file_system_name,  # type: str
-            directory_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call
-                                                      credential=credential, **kwargs)
-
-    async def create_directory(self, content_settings=None,  # type: Optional[ContentSettings]
-                               metadata=None,  # type: Optional[Dict[str, str]]
-                               **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Create a new directory.
-
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask:
-            Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions:
-            Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: response dict (Etag and last modified).
-        """
-        return await self._create('directory', content_settings=content_settings, metadata=metadata, **kwargs)
-
-    async def delete_directory(self, **kwargs):
-        # type: (...) -> None
-        """
-        Marks the specified directory for deletion.
-
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        return await self._delete(**kwargs)
-
-    async def get_directory_properties(self, **kwargs):
-        # type: (**Any) -> DirectoryProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the directory. It does not return the content of the directory.
-
-        :keyword lease:
-            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: DirectoryProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a file/directory.
-        """
-        blob_properties = await self._get_path_properties(**kwargs)
-        return DirectoryProperties._from_blob_properties(blob_properties)  # pylint: disable=protected-access
-
-    async def rename_directory(self, rename_destination, **kwargs):
-        # type: (**Any) -> DataLakeDirectoryClient
-        """
-        Rename the source directory.
-
-        :param str rename_destination:
-            the new directory name the user want to rename to.
-            The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
-        :keyword source_lease:
-            A lease ID for the source path. If specified,
-            the source path must have an active lease and the leaase ID must
-            match.
-        :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask:
-            Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword permissions:
-            Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :type permissions: str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient
-        """
-        rename_destination = rename_destination.strip('/')
-        new_file_system = rename_destination.split('/')[0]
-        path = rename_destination[len(new_file_system):]
-
-        new_directory_client = DataLakeDirectoryClient(
-            self.url, new_file_system, directory_name=path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-        await new_directory_client._rename_path('/' + self.file_system_name + '/' + self.path_name,  # pylint: disable=protected-access
-                                                **kwargs)
-        return new_directory_client
-
-    async def create_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
-                                   content_settings=None,  # type: Optional[ContentSettings]
-                                   metadata=None,  # type: Optional[Dict[str, str]]
-                                   **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Create a subdirectory and return the subdirectory client to be interacted with.
-
-        :param sub_directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask:
-            Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions:
-            Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient for the subdirectory.
-        """
-        subdir = self.get_sub_directory_client(sub_directory)
-        await subdir.create_directory(content_settings=content_settings, metadata=metadata, **kwargs)
-        return subdir
-
-    async def delete_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
-                                   **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Marks the specified subdirectory for deletion.
-
-        :param sub_directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient for the subdirectory
-        """
-        subdir = self.get_sub_directory_client(sub_directory)
-        await subdir.delete_directory(**kwargs)
-        return subdir
-
-    async def create_file(self, file,  # type: Union[FileProperties, str]
-                          **kwargs):
-        # type: (...) -> DataLakeFileClient
-        """
-        Create a new file and return the file client to be interacted with.
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties.
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :keyword metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask:
-            Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions:
-            Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeFileClient
-        """
-        file_client = self.get_file_client(file)
-        await file_client.create_file(**kwargs)
-        return file_client
-
-    def get_file_client(self, file  # type: Union[FileProperties, str]
-                        ):
-        # type: (...) -> DataLakeFileClient
-        """Get a client to interact with the specified file.
-
-        The file need not already exist.
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties. eg. directory/subdirectory/file
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :returns: A DataLakeFileClient.
-        :rtype: ~azure.storage.filedatalake..DataLakeFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_file_client]
-                :end-before: [END bsc_get_file_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the file client to interact with a specific file.
-        """
-        try:
-            file_path = file.name
-        except AttributeError:
-            file_path = self.path_name + '/' + file
-
-        return DataLakeFileClient(
-            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-
-    def get_sub_directory_client(self, sub_directory  # type: Union[DirectoryProperties, str]
-                                 ):
-        # type: (...) -> DataLakeDirectoryClient
-        """Get a client to interact with the specified subdirectory of the current directory.
-
-        The sub subdirectory need not already exist.
-
-        :param sub_directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :returns: A DataLakeDirectoryClient.
-        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_directory_client]
-                :end-before: [END bsc_get_directory_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the directory client to interact with a specific directory.
-        """
-        try:
-            subdir_path = sub_directory.name
-        except AttributeError:
-            subdir_path = self.path_name + '/' + sub_directory
-
-        return DataLakeDirectoryClient(
-            self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,421 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._path_client_async import PathClient
-from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase
-from .._deserialize import process_storage_error
-from .._generated.models import StorageErrorException
-from .._models import FileProperties
-
-
-class DataLakeFileClient(PathClient, DataLakeFileClientBase):
-    """A client to interact with the DataLake file, even if the file may not yet exist.
-
-    :ivar str url:
-        The full endpoint URL to the file system, including SAS token if used.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URI to the storage account.
-    :param file_system_name:
-        The file system for the directory or files.
-    :type file_system_name: str
-    :param file_path:
-        The whole file path, so that to interact with a specific file.
-        eg. "{directory}/{subdirectory}/{file}"
-    :type file_path: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client]
-            :end-before: [END create_datalake_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client_oauth]
-            :end-before: [END create_datalake_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            file_system_name,  # type: str
-            file_path,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
-                                                 credential=credential, **kwargs)
-
-    async def create_file(self, content_settings=None,  # type: Optional[ContentSettings]
-                          metadata=None,  # type: Optional[Dict[str, str]]
-                          **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Create a new file.
-
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: response dict (Etag and last modified).
-        """
-        return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
-
-    async def delete_file(self, **kwargs):
-        # type: (...) -> None
-        """
-        Marks the specified file for deletion.
-
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        return await self._delete(**kwargs)
-
-    async def get_file_properties(self, **kwargs):
-        # type: (**Any) -> FileProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file. It does not return the content of the file.
-
-        :keyword lease:
-            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: FileProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a file/directory.
-        """
-        blob_properties = await self._get_path_properties(**kwargs)
-        return FileProperties._from_blob_properties(blob_properties)  # pylint: disable=protected-access
-
-    async def append_data(self, data,  # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
-                          offset,  # type: int
-                          length=None,  # type: Optional[int]
-                          **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime, int]]
-        """Append data to the file.
-
-        :param data: Content to be appended to file
-        :param offset: start position of the data to be appended to.
-        :param length: Size of the data in bytes.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the block content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            blob.
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :return: dict of the response header
-        """
-        options = self._append_data_options(
-            data,
-            offset,
-            length=length,
-            **kwargs)
-        try:
-            return await self._client.path.append_data(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def flush_data(self, offset,  # type: int
-                         retain_uncommitted_data=False,  # type: Optional[bool]
-                         **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """ Commit the previous appended data.
-
-        :param offset: offset is equal to the length of the file after commit the
-            previous appended data.
-        :param bool retain_uncommitted_data: Valid only for flush operations.  If
-            "true", uncommitted data is retained after the flush operation
-            completes; otherwise, the uncommitted data is deleted after the flush
-            operation.  The default is false.  Data at offsets less than the
-            specified position are written to the file when flush succeeds, but
-            this optional parameter allows data after the flush position to be
-            retained for a future flush operation.
-        :keyword bool close: Azure Storage Events allow applications to receive
-            notifications when files change. When Azure Storage Events are
-            enabled, a file changed event is raised. This event has a property
-            indicating whether this is the final change to distinguish the
-            difference between an intermediate flush to a file stream and the
-            final close of a file stream. The close query parameter is valid only
-            when the action is "flush" and change notifications are enabled. If
-            the value of close is "true" and the flush operation completes
-            successfully, the service raises a file change notification with a
-            property indicating that this is the final update (the file stream has
-            been closed). If "false" a change notification is raised indicating
-            the file has changed. The default is false. This query parameter is
-            set to true by the Hadoop ABFS driver to indicate that the file stream
-            has been closed."
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :return: response header in dict
-        """
-        options = self._flush_data_options(
-            offset,
-            retain_uncommitted_data=retain_uncommitted_data, **kwargs)
-        try:
-            return await self._client.path.flush_data(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def read_file(self, offset=None,  # type: Optional[int]
-                        length=None,  # type: Optional[int]
-                        stream=None,  # type: Optional[IO]
-                        **kwargs):
-        # type: (...) -> Union[int, byte]
-        """Download a file from the service. Return the downloaded data in bytes or
-        write the downloaded data into user provided stream and return the written size.
-
-        :param int offset:
-            Start of byte range to use for downloading a section of the file.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :param int stream:
-            User provided stream to write the downloaded data into.
-        :keyword lease:
-            If specified, download_blob only succeeds if the blob's lease is active
-            and matches this ID. Required if the blob has an active lease.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds. This method may make
-            multiple calls to the Azure service and the timeout will apply to
-            each call individually.
-        :returns: downloaded data or the size of data written into the provided stream
-        :rtype: bytes or int
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_hello_world.py
-                :start-after: [START download_a_blob]
-                :end-before: [END download_a_blob]
-                :language: python
-                :dedent: 12
-                :caption: Download a blob.
-        """
-        downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs)
-        if stream:
-            return await downloader.readinto(stream)
-        return await downloader.readall()
-
-    async def rename_file(self, rename_destination, **kwargs):
-        # type: (**Any) -> DataLakeFileClient
-        """
-        Rename the source file.
-
-        :param str rename_destination: the new file name the user want to rename to.
-            The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
-        :keyword source_lease: A lease ID for the source path. If specified,
-            the source path must have an active lease and the leaase ID must
-            match.
-        :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword permissions: Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :type permissions: str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-        """
-        rename_destination = rename_destination.strip('/')
-        new_file_system = rename_destination.split('/')[0]
-        path = rename_destination[len(new_file_system):]
-
-        new_directory_client = DataLakeFileClient(
-            self.url, new_file_system, file_path=path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
-        await new_directory_client._rename_path('/' + self.file_system_name + '/' + self.path_name, # pylint: disable=protected-access
-                                                **kwargs)
-        return new_directory_client
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,242 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any,
-    TypeVar, TYPE_CHECKING
-)
-from azure.storage.blob.aio import BlobLeaseClient
-from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase
-
-
-if TYPE_CHECKING:
-    FileSystemClient = TypeVar("FileSystemClient")
-    DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
-    DataLakeFileClient = TypeVar("DataLakeFileClient")
-
-
-class DataLakeLeaseClient(DataLakeLeaseClientBase):
-    """Creates a new DataLakeLeaseClient.
-
-    This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
-
-    :ivar str id:
-        The ID of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired.
-    :ivar str etag:
-        The ETag of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired or modified.
-    :ivar ~datetime.datetime last_modified:
-        The last modified timestamp of the lease currently being maintained.
-        This will be `None` if no lease has yet been acquired or modified.
-
-    :param client:
-        The client of the file system, directory, or file to lease.
-    :type client: ~azure.storage.filedatalake.FileSystemClient or
-        ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient
-    :param str lease_id:
-        A string representing the lease ID of an existing lease. This value does not
-        need to be specified in order to acquire a new lease, or break one.
-    """
-    def __init__(
-            self, client, lease_id=None
-    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
-        # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
-        super(DataLakeLeaseClient, self).__init__(client, lease_id)
-
-        if hasattr(client, '_blob_client'):
-            _client = client._blob_client  # type: ignore # pylint: disable=protected-access
-        elif hasattr(client, '_container_client'):
-            _client = client._container_client  # type: ignore # pylint: disable=protected-access
-        else:
-            raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
-
-        self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
-
-    def __enter__(self):
-        raise TypeError("Async lease must use 'async with'.")
-
-    def __exit__(self, *args):
-        self.release()
-
-    async def __aenter__(self):
-        return self
-
-    async def __aexit__(self, *args):
-        await self.release()
-
-    async def acquire(self, lease_duration=-1, **kwargs):
-        # type: (int, Optional[int], **Any) -> None
-        """Requests a new lease.
-
-        If the container does not have an active lease, the Blob service creates a
-        lease on the container and returns a new lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
-        self._update_lease_client_attributes()
-
-    async def renew(self, **kwargs):
-        # type: (Any) -> None
-        """Renews the lease.
-
-        The lease can be renewed if the lease ID specified in the
-        lease client matches that associated with the container or blob. Note that
-        the lease may be renewed even if it has expired as long as the container
-        or blob has not been leased again since the expiration of that lease. When you
-        renew a lease, the lease duration clock resets.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        await self._blob_lease_client.renew(**kwargs)
-        self._update_lease_client_attributes()
-
-    async def release(self, **kwargs):
-        # type: (Any) -> None
-        """Release the lease.
-
-        The lease may be released if the client lease id specified matches
-        that associated with the container or blob. Releasing the lease allows another client
-        to immediately acquire the lease for the container or blob as soon as the release is complete.
-
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        await self._blob_lease_client.release(**kwargs)
-        self._update_lease_client_attributes()
-
-    async def change(self, proposed_lease_id, **kwargs):
-        # type: (str, Any) -> None
-        """Change the lease ID of an active lease.
-
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The Blob service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
-        self._update_lease_client_attributes()
-
-    async def break_lease(self, lease_break_period=None, **kwargs):
-        # type: (Optional[int], Any) -> int
-        """Break the lease, if the container or blob has an active lease.
-
-        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. When a lease
-        is broken, the lease break period is allowed to elapse, during which time
-        no lease operation except break and release can be performed on the container or blob.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired.
-
-        :param int lease_break_period:
-            This is the proposed duration of seconds that the lease
-            should continue before it is broken, between 0 and 60 seconds. This
-            break period is only used if it is shorter than the time remaining
-            on the lease. If longer, the time remaining on the lease is used.
-            A new lease will not be available before the break period has
-            expired, but the lease may be held for longer than the break
-            period. If this header does not appear with a break
-            operation, a fixed-duration lease breaks after the remaining lease
-            period elapses, and an infinite lease breaks immediately.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :rtype: int
-        """
-        await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,330 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.core.paging import ItemPaged
-
-from azure.storage.blob.aio import BlobServiceClient
-from .._generated.aio import DataLakeStorageClient
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin
-from ._file_system_client_async import FileSystemClient
-from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase
-from .._shared.policies_async import ExponentialRetry
-from ._data_lake_directory_client_async import DataLakeDirectoryClient
-from ._data_lake_file_client_async import DataLakeFileClient
-from ._models import FileSystemPropertiesPaged
-from .._models import UserDelegationKey
-
-
-class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase):
-    """A client to interact with the DataLake Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete file systems within the account.
-    For operations relating to a specific file system, directory or file, clients for those entities
-    can also be retrieved using the `get_client` functions.
-
-    :ivar str url:
-        The full endpoint URL to the datalake service endpoint. This could be either the
-        primary endpoint, or the secondary endpoint depending on the current `location_mode`.
-    :ivar str primary_endpoint:
-        The full primary endpoint URL.
-    :ivar str primary_hostname:
-        The hostname of the primary endpoint.
-    :param str account_url:
-        The URL to the DataLake storage account. Any other entities included
-        in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, and account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-        If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client]
-            :end-before: [END create_datalake_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with account url and credential.
-
-        .. literalinclude:: ../samples/test_datalake_authentication_samples.py
-            :start-after: [START create_datalake_service_client_oauth]
-            :end-before: [END create_datalake_service_client_oauth]
-            :language: python
-            :dedent: 8
-            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        super(DataLakeServiceClient, self).__init__(
-            account_url,
-            credential=credential,
-            **kwargs
-        )
-        self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs)
-        self._client = DataLakeStorageClient(self.url, None, None, pipeline=self._pipeline)
-        self._loop = kwargs.get('loop', None)
-
-    async def get_user_delegation_key(self, key_start_time,  # type: datetime
-                                      key_expiry_time,  # type: datetime
-                                      **kwargs  # type: Any
-                                      ):
-        # type: (...) -> UserDelegationKey
-        """
-        Obtain a user delegation key for the purpose of signing SAS tokens.
-        A token credential must be present on the service object for this request to succeed.
-
-        :param ~datetime.datetime key_start_time:
-            A DateTime value. Indicates when the key becomes valid.
-        :param ~datetime.datetime key_expiry_time:
-            A DateTime value. Indicates when the key stops being valid.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The user delegation key.
-        :rtype: ~azure.storage.filedatalake.UserDelegationKey
-        """
-        delegation_key = await self._blob_service_client.get_user_delegation_key(
-            key_start_time=key_start_time,
-            key_expiry_time=key_expiry_time,
-            **kwargs)  # pylint: disable=protected-access
-        delegation_key._class_ = UserDelegationKey  # pylint: disable=protected-access
-        return delegation_key
-
-    def list_file_systems(self, name_starts_with=None,  # type: Optional[str]
-                          include_metadata=None,  # type: Optional[bool]
-                          **kwargs):
-        # type: (...) -> ItemPaged[FileSystemProperties]
-        """Returns a generator to list the file systems under the specified account.
-
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all file systems have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only file systems whose names
-            begin with the specified prefix.
-        :param bool include_metadata:
-            Specifies that file system metadata be returned in the response.
-            The default value is `False`.
-        :keyword int results_per_page:
-            The maximum number of file system names to retrieve per API
-            call. If the request does not specify the server will return up to 5,000 items per page.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) of FileSystemProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START dsc_list_file_systems]
-                :end-before: [END dsc_list_file_systems]
-                :language: python
-                :dedent: 12
-                :caption: Listing the file systems in the datalake service.
-        """
-        item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
-                                                               include_metadata=include_metadata,
-                                                               **kwargs)  # pylint: disable=protected-access
-        item_paged._page_iterator_class = FileSystemPropertiesPaged  # pylint: disable=protected-access
-        return item_paged
-
-    async def create_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
-                                 metadata=None,  # type: Optional[Dict[str, str]]
-                                 public_access=None,  # type: Optional[PublicAccess]
-                                 **kwargs):
-        # type: (...) -> FileSystemClient
-        """Creates a new file system under the specified account.
-
-        If the file system with the same name already exists, a ResourceExistsError will
-        be raised. This method returns a client with which to interact with the newly
-        created file system.
-
-        :param str file_system: The name of the file system to create.
-        :param metadata:
-            A dict with name-value pairs to associate with the
-            file system as metadata. Example: `{'Category':'test'}`
-        :type metadata: dict(str, str)
-        :param public_access:
-            Possible values include: file system, file.
-        :type public_access: ~azure.storage.filedatalake.PublicAccess
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.filedatalake.FileSystemClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START dsc_create_file_system]
-                :end-before: [END dsc_create_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Creating a file system in the datalake service.
-        """
-        file_system_client = self.get_file_system_client(file_system)
-        await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
-        return file_system_client
-
-    async def delete_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
-                                 **kwargs):
-        # type: (...) -> FileSystemClient
-        """Marks the specified file system for deletion.
-
-        The file system and any files contained within it are later deleted during garbage collection.
-        If the file system is not found, a ResourceNotFoundError will be raised.
-
-        :param file_system:
-            The file system to delete. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, delete_file_system only succeeds if the
-            file system's lease is active and matches this ID.
-            Required if the file system has an active lease.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_delete_file_system]
-                :end-before: [END bsc_delete_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Deleting a file system in the datalake service.
-        """
-        file_system_client = self.get_file_system_client(file_system)
-        await file_system_client.delete_file_system(**kwargs)
-        return file_system_client
-
-    def get_file_system_client(self, file_system  # type: Union[FileSystemProperties, str]
-                               ):
-        # type: (...) -> FileSystemClient
-        """Get a client to interact with the specified file system.
-
-        The file system need not already exist.
-
-        :param file_system:
-            The file system. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :returns: A FileSystemClient.
-        :rtype: ~azure.storage.filedatalake.FileSystemClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_file_system_client]
-                :end-before: [END bsc_get_file_system_client]
-                :language: python
-                :dedent: 8
-                :caption: Getting the file system client to interact with a specific file system.
-        """
-        return FileSystemClient(self.url, file_system, credential=self._raw_credential, _configuration=self._config,
-                                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
-                                require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
-                                key_resolver_function=self.key_resolver_function)
-
-    def get_directory_client(self, file_system,  # type: Union[FileSystemProperties, str]
-                             directory  # type: Union[DirectoryProperties, str]
-                             ):
-        # type: (...) -> DataLakeDirectoryClient
-        """Get a client to interact with the specified directory.
-
-        The directory need not already exist.
-
-        :param file_system:
-            The file system that the directory is in. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :returns: A DataLakeDirectoryClient.
-        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_directory_client]
-                :end-before: [END bsc_get_directory_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the directory client to interact with a specific directory.
-        """
-        return DataLakeDirectoryClient(self.url, file_system, directory_name=directory,
-                                       credential=self._raw_credential,
-                                       _configuration=self._config, _pipeline=self._pipeline,
-                                       _location_mode=self._location_mode, _hosts=self._hosts,
-                                       require_encryption=self.require_encryption,
-                                       key_encryption_key=self.key_encryption_key,
-                                       key_resolver_function=self.key_resolver_function
-                                       )
-
-    def get_file_client(self, file_system,  # type: Union[FileSystemProperties, str]
-                        file_path  # type: Union[FileProperties, str]
-                        ):
-        # type: (...) -> DataLakeFileClient
-        """Get a client to interact with the specified file.
-
-        The file need not already exist.
-
-        :param file_system:
-            The file system that the file is in. This can either be the name of the file system,
-            or an instance of FileSystemProperties.
-        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
-        :param file_path:
-            The file with which to interact. This can either be the full path of the file(from the root directory),
-            or an instance of FileProperties. eg. directory/subdirectory/file
-        :type file_path: str or ~azure.storage.filedatalake.FileProperties
-        :returns: A DataLakeFileClient.
-        :rtype: ~azure.storage.filedatalake..DataLakeFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_datalake_service_samples.py
-                :start-after: [START bsc_get_file_client]
-                :end-before: [END bsc_get_file_client]
-                :language: python
-                :dedent: 12
-                :caption: Getting the file client to interact with a specific file.
-        """
-        try:
-            file_path = file_path.name
-        except AttributeError:
-            pass
-
-        return DataLakeFileClient(
-            self.url, file_system, file_path=file_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,610 +0,0 @@
-# pylint: disable=too-many-lines
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Dict, TYPE_CHECKING
-)
-
-from azure.core.async_paging import AsyncItemPaged
-
-from azure.core.tracing.decorator_async import distributed_trace_async
-from azure.storage.blob.aio import ContainerClient
-
-from ._data_lake_file_client_async import DataLakeFileClient
-from ._data_lake_directory_client_async import DataLakeDirectoryClient
-from ._models import PathPropertiesPaged
-from ._data_lake_lease_async import DataLakeLeaseClient
-from .._file_system_client import FileSystemClient as FileSystemClientBase
-from .._generated.aio import DataLakeStorageClient
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin
-from .._shared.policies_async import ExponentialRetry
-from .._models import FileSystemProperties
-
-if TYPE_CHECKING:
-    from .._models import PublicAccess
-    from datetime import datetime
-    from .._models import (  # pylint: disable=unused-import
-        ContentSettings)
-
-
-class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase):
-    """A client to interact with a specific file system, even if that file system
-     may not yet exist.
-
-     For operations relating to a specific directory or file within this file system, a directory client or file client
-     can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
-
-     :ivar str url:
-         The full endpoint URL to the file system, including SAS token if used.
-     :ivar str primary_endpoint:
-         The full primary endpoint URL.
-     :ivar str primary_hostname:
-         The hostname of the primary endpoint.
-     :param str account_url:
-         The URI to the storage account.
-     :param file_system_name:
-         The file system for the directory or files.
-     :type file_system_name: str
-     :param credential:
-         The credentials with which to authenticate. This is optional if the
-         account URL already has a SAS token. The value can be a SAS token string, and account
-         shared access key, or an instance of a TokenCredentials class from azure.identity.
-         If the URL already has a SAS token, specifying an explicit credential will take priority.
-
-     .. admonition:: Example:
-
-         .. literalinclude:: ../samples/test_file_system_samples.py
-             :start-after: [START create_file_system_client_from_service]
-             :end-before: [END create_file_system_client_from_service]
-             :language: python
-             :dedent: 8
-             :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
-
-         .. literalinclude:: ../samples/test_file_system_samples.py
-             :start-after: [START create_file_system_client_sasurl]
-             :end-before: [END create_file_system_client_sasurl]
-             :language: python
-             :dedent: 8
-             :caption: Creating the FileSystemClient client directly.
-     """
-
-    def __init__(
-            self, account_url,  # type: str
-            file_system_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        super(FileSystemClient, self).__init__(
-            account_url,
-            file_system_name=file_system_name,
-            credential=credential,
-            **kwargs)
-        # to override the class field _container_client sync version
-        kwargs.pop('_hosts', None)
-        self._container_client = ContainerClient(self._blob_account_url, file_system_name,
-                                                 credential=credential,
-                                                 _hosts=self._container_client._hosts,# pylint: disable=protected-access
-                                                 **kwargs)  # type: ignore # pylint: disable=protected-access
-        self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline)
-        self._loop = kwargs.get('loop', None)
-
-    @distributed_trace_async
-    async def acquire_lease(
-            self, lease_duration=-1,  # type: int
-            lease_id=None,  # type: Optional[str]
-            **kwargs
-    ):
-        # type: (...) -> DataLakeLeaseClient
-        """
-        Requests a new lease. If the file system does not have an active lease,
-        the DataLake service creates a lease on the file system and returns a new
-        lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The DataLake service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
-        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START acquire_lease_on_file_system]
-                :end-before: [END acquire_lease_on_file_system]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on the file_system.
-        """
-        lease = DataLakeLeaseClient(self, lease_id=lease_id)
-        lease.acquire(lease_duration=lease_duration, **kwargs)
-        return lease
-
-    async def create_file_system(self, metadata=None,  # type: Optional[Dict[str, str]]
-                                 public_access=None,  # type: Optional[PublicAccess]
-                                 **kwargs):
-        # type: (...) ->  Dict[str, Union[str, datetime]]
-        """Creates a new file system under the specified account.
-
-        If the file system with the same name already exists, a ResourceExistsError will
-        be raised. This method returns a client with which to interact with the newly
-        created file system.
-
-        :param metadata:
-            A dict with name-value pairs to associate with the
-            file system as metadata. Example: `{'Category':'test'}`
-        :type metadata: dict(str, str)
-        :param public_access:
-            Possible values include: file system, file.
-        :type public_access: ~azure.storage.filedatalake.PublicAccess
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.filedatalake.FileSystemClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START create_file_system]
-                :end-before: [END create_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Creating a file system in the datalake service.
-        """
-        return await self._container_client.create_container(metadata=metadata,
-                                                             public_access=public_access,
-                                                             **kwargs)
-
-    async def delete_file_system(self, **kwargs):
-        # type: (Any) -> None
-        """Marks the specified file system for deletion.
-
-        The file system and any files contained within it are later deleted during garbage collection.
-        If the file system is not found, a ResourceNotFoundError will be raised.
-
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, delete_file_system only succeeds if the
-            file system's lease is active and matches this ID.
-            Required if the file system has an active lease.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START delete_file_system]
-                :end-before: [END delete_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Deleting a file system in the datalake service.
-        """
-        await self._container_client.delete_container(**kwargs)
-
-    async def get_file_system_properties(self, **kwargs):
-        # type: (Any) -> FileSystemProperties
-        """Returns all user-defined metadata and system properties for the specified
-        file system. The data returned does not include the file system's list of paths.
-
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, get_file_system_properties only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Properties for the specified file system within a file system object.
-        :rtype: ~azure.storage.filedatalake.FileSystemProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START get_file_system_properties]
-                :end-before: [END get_file_system_properties]
-                :language: python
-                :dedent: 12
-                :caption: Getting properties on the file system.
-        """
-        container_properties = await self._container_client.get_container_properties(**kwargs)
-        return FileSystemProperties._convert_from_container_props(container_properties)  # pylint: disable=protected-access
-
-    async def set_file_system_metadata(  # type: ignore
-            self, metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-    ):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets one or more user-defined name-value pairs for the specified
-        file system. Each call to this operation replaces all existing metadata
-        attached to the file system. To remove all metadata from the file system,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the file system as
-            metadata. Example: {'category':'test'}
-        :type metadata: dict[str, str]
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, set_file_system_metadata only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: file system-updated property dict (Etag and last modified).
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START set_file_system_metadata]
-                :end-before: [END set_file_system_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Setting metadata on the container.
-        """
-        return await self._container_client.set_container_metadata(metadata=metadata, **kwargs)
-
-    def get_paths(self, path=None,  # type: Optional[str]
-                        recursive=True,  # type: Optional[bool]
-                        max_results=None,  # type: Optional[int]
-                        **kwargs):
-        # type: (...) -> ItemPaged[PathProperties]
-        """Returns a generator to list the paths(could be files or directories) under the specified file system.
-        The generator will lazily follow the continuation tokens returned by
-        the service.
-
-        :param str path:
-            Filters the results to return only paths under the specified path.
-        :param int max_results: An optional value that specifies the maximum
-            number of items to return per page. If omitted or greater than 5,000, the
-            response will include up to 5,000 items per page.
-        :keyword upn: Optional. Valid only when Hierarchical Namespace is
-         enabled for the account. If "true", the user identity values returned
-         in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-         transformed from Azure Active Directory Object IDs to User Principal
-         Names.  If "false", the values will be returned as Azure Active
-         Directory Object IDs. The default value is false. Note that group and
-         application Object IDs are not translated because they do not have
-         unique friendly names.
-        :type upn: bool
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) response of PathProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_containers.py
-                :start-after: [START list_blobs_in_container]
-                :end-before: [END list_blobs_in_container]
-                :language: python
-                :dedent: 8
-                :caption: List the blobs in the container.
-        """
-        timeout = kwargs.pop('timeout', None)
-        command = functools.partial(
-            self._client.file_system.list_paths,
-            path=path,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command, recursive, path=path, max_results=max_results,
-            page_iterator_class=PathPropertiesPaged, **kwargs)
-
-    async def create_directory(self, directory,  # type: Union[DirectoryProperties, str]
-                               content_settings=None,  # type: Optional[ContentSettings]
-                               metadata=None,  # type: Optional[Dict[str, str]]
-                               **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Create directory
-
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient
-        """
-        directory_client = self.get_directory_client(directory)
-        await directory_client.create_directory(content_settings=content_settings, metadata=metadata, **kwargs)
-        return directory_client
-
-    async def delete_directory(self, directory,  # type: Union[DirectoryProperties, str]
-                               **kwargs):
-        # type: (...) -> DataLakeDirectoryClient
-        """
-        Marks the specified path for deletion.
-
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeDirectoryClient
-        """
-        directory_client = self.get_directory_client(directory)
-        await directory_client.delete_directory(**kwargs)
-        return directory_client
-
-    async def create_file(self, file,  # type: Union[FileProperties, str]
-                          **kwargs):
-        # type: (...) -> DataLakeFileClient
-        """
-        Create file
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties.
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the blob as metadata.
-        :type metadata: dict(str, str)
-        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
-            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword str permissions: Optional and only valid if Hierarchical Namespace
-         is enabled for the account. Sets POSIX access permissions for the file
-         owner, the file owning group, and others. Each class may be granted
-         read, write, or execute permission.  The sticky bit is also supported.
-         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-         supported.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeFileClient
-        """
-        file_client = self.get_file_client(file)
-        await file_client.create_file(**kwargs)
-        return file_client
-
-    async def delete_file(self, file,  # type: Union[FileProperties, str]
-                          lease=None,  # type: Optional[Union[DataLakeLeaseClient, str]]
-                          **kwargs):
-        # type: (...) -> DataLakeFileClient
-        """
-        Marks the specified file for deletion.
-
-        :param file:
-            The file with which to interact. This can either be the name of the file,
-            or an instance of FileProperties.
-        :type file: str or ~azure.storage.filedatalake.FileProperties
-        :keyword lease:
-            Required if the blob has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.blob.LeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: DataLakeFileClient
-        """
-        file_client = self.get_file_client(file)
-        await file_client.delete_file(lease=lease, **kwargs)
-        return file_client
-
-    def get_directory_client(self, directory  # type: Union[DirectoryProperties, str]
-                             ):
-        # type: (...) -> DataLakeDirectoryClient
-        """Get a client to interact with the specified directory.
-
-        The directory need not already exist.
-
-        :param directory:
-            The directory with which to interact. This can either be the name of the directory,
-            or an instance of DirectoryProperties.
-        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
-        :returns: A DataLakeDirectoryClient.
-        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START get_directory_client_from_file_system]
-                :end-before: [END get_directory_client_from_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Getting the directory client to interact with a specific directory.
-        """
-        return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory,
-                                       credential=self._raw_credential,
-                                       _configuration=self._config, _pipeline=self._pipeline,
-                                       _location_mode=self._location_mode, _hosts=self._hosts,
-                                       require_encryption=self.require_encryption,
-                                       key_encryption_key=self.key_encryption_key,
-                                       key_resolver_function=self.key_resolver_function,
-                                       loop=self._loop
-                                       )
-
-    def get_file_client(self, file_path  # type: Union[FileProperties, str]
-                        ):
-        # type: (...) -> DataLakeFileClient
-        """Get a client to interact with the specified file.
-
-        The file need not already exist.
-
-        :param file_path:
-            The file with which to interact. This can either be the path of the file(from root directory),
-            or an instance of FileProperties. eg. directory/subdirectory/file
-        :type file_path: str or ~azure.storage.filedatalake.FileProperties
-        :returns: A DataLakeFileClient.
-        :rtype: ~azure.storage.filedatalake..DataLakeFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START get_file_client_from_file_system]
-                :end-before: [END get_file_client_from_file_system]
-                :language: python
-                :dedent: 12
-                :caption: Getting the file client to interact with a specific file.
-        """
-        try:
-            file_path = file_path.name
-        except AttributeError:
-            pass
-
-        return DataLakeFileClient(
-            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
-            _location_mode=self._location_mode, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            key_resolver_function=self.key_resolver_function, loop=self._loop)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,110 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called, too-many-lines
-from azure.core.async_paging import AsyncPageIterator
-from azure.storage.blob.aio._models import ContainerPropertiesPaged
-
-from .._deserialize import return_headers_and_deserialized_path_list, process_storage_error
-from .._generated.models import StorageErrorException, Path
-from .._models import PathProperties
-
-from .._models import FileSystemProperties
-
-
-class FileSystemPropertiesPaged(ContainerPropertiesPaged):
-    """An Iterable of File System properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A file system name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only file systems whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of file system names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(FileSystemPropertiesPaged, self).__init__(
-            *args,
-            **kwargs
-        )
-
-    @staticmethod
-    def _build_item(item):
-        return FileSystemProperties._from_generated(item)  # pylint: disable=protected-access
-
-
-class PathPropertiesPaged(AsyncPageIterator):
-    """An Iterable of Path properties.
-
-    :ivar str path: Filters the results to return only paths under the specified path.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str path: Filters the results to return only paths under the specified path.
-    :param int max_results: The maximum number of psths to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-
-    def __init__(
-            self, command,
-            recursive,
-            path=None,
-            max_results=None,
-            continuation_token=None,
-            upn=None):
-        super(PathPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.recursive = recursive
-        self.results_per_page = max_results
-        self.path = path
-        self.upn = upn
-        self.current_page = None
-        self.path_list = None
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                self.recursive,
-                continuation=continuation_token or None,
-                path=self.path,
-                max_results=self.results_per_page,
-                upn=self.upn,
-                cls=return_headers_and_deserialized_path_list)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.path_list, self._response = get_next_return
-        self.current_page = [self._build_item(item) for item in self.path_list]
-
-        return self._response['continuation'] or None, self.current_page
-
-    @staticmethod
-    def _build_item(item):
-        if isinstance(item, PathProperties):
-            return item
-        if isinstance(item, Path):
-            path = PathProperties._from_generated(item)  # pylint: disable=protected-access
-            return path
-        return item
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,508 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-from azure.storage.blob.aio import BlobClient
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin
-from .._path_client import PathClient as PathClientBase
-from .._models import DirectoryProperties
-from .._generated.aio import DataLakeStorageClient
-from ._data_lake_lease_async import DataLakeLeaseClient
-from .._generated.models import StorageErrorException
-from .._deserialize import process_storage_error
-from .._shared.policies_async import ExponentialRetry
-
-_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
-    'The require_encryption flag is set, but encryption is not supported'
-    ' for this method.')
-
-
-class PathClient(AsyncStorageAccountHostsMixin, PathClientBase):
-    def __init__(
-            self, account_url,  # type: str
-            file_system_name,  # type: str
-            path_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-
-        super(PathClient, self).__init__(account_url, file_system_name, path_name, # type: ignore # pylint: disable=specify-parameter-names-in-call
-                                         credential=credential,
-                                         **kwargs)
-
-        kwargs.pop('_hosts', None)
-        self._blob_client = BlobClient(self._blob_account_url, file_system_name, blob_name=path_name,
-                                       credential=credential, _hosts=self._blob_client._hosts, **kwargs)  # type: ignore # pylint: disable=protected-access
-        self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline)
-        self._loop = kwargs.get('loop', None)
-
-    async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Create directory or file
-
-        :param resource_type:
-            Required for Create File and Create Directory.
-            The value must be "file" or "directory". Possible values include:
-            'directory', 'file'
-        :type resource_type: str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param metadata:
-            Name-value pairs associated with the file/directory as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword str umask:
-            Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :keyword permissions:
-            Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :type permissions: str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Dict[str, Union[str, datetime]]
-        """
-        options = self._create_path_options(
-            resource_type,
-            content_settings=content_settings,
-            metadata=metadata,
-            **kwargs)
-        try:
-            return await self._client.path.create(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _delete(self, **kwargs):
-        # type: (bool, **Any) -> None
-        """
-        Marks the specified path for deletion.
-
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        options = self._delete_path_options(**kwargs)
-        try:
-            return await self._client.path.delete(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def set_access_control(self, owner=None,  # type: Optional[str]
-                                 group=None,  # type: Optional[str]
-                                 permissions=None,  # type: Optional[str]
-                                 acl=None,  # type: Optional[str]
-                                 **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """
-        Set the owner, group, permissions, or access control list for a path.
-
-        :param owner:
-            Optional. The owner of the file or directory.
-        :type owner: str
-        :param group:
-            Optional. The owning group of the file or directory.
-        :type group: str
-        :param permissions:
-            Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-            permissions and acl are mutually exclusive.
-        :type permissions: str
-        :param acl:
-            Sets POSIX access control rights on files and directories.
-            The value is a comma-separated list of access control entries. Each
-            access control entry (ACE) consists of a scope, a type, a user or
-            group identifier, and permissions in the format
-            "[scope:][type]:[id]:[permissions]".
-            permissions and acl are mutually exclusive.
-        :type acl: str
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword: response dict (Etag and last modified).
-        """
-        options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
-        try:
-            return await self._client.path.set_access_control(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def get_access_control(self, upn=None,  # type: Optional[bool]
-                                 **kwargs):
-        # type: (...) -> Dict[str, Any]
-        """
-        Get the owner, group, permissions, or access control list for a path.
-
-        :param upn:
-            Optional. Valid only when Hierarchical Namespace is
-            enabled for the account. If "true", the user identity values returned
-            in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
-            transformed from Azure Active Directory Object IDs to User Principal
-            Names.  If "false", the values will be returned as Azure Active
-            Directory Object IDs. The default value is false. Note that group and
-            application Object IDs are not translated because they do not have
-            unique friendly names.
-        :type upn: bool
-        :keyword lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword: response dict.
-        """
-        options = self._get_access_control_options(upn=upn, **kwargs)
-        try:
-            return await self._client.path.get_properties(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _rename_path(self, rename_source,
-                           **kwargs):
-        # type: (**Any) -> Dict[str, Any]
-        """
-        Rename directory or file
-
-        :param rename_source: The value must have the following format: "/{filesystem}/{path}".
-        :type rename_source: str
-        :param source_lease: A lease ID for the source path. If specified,
-            the source path must have an active lease and the leaase ID must
-            match.
-        :type source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set path properties.
-        :param lease:
-            Required if the file/directory has an active lease. Value can be a LeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :param str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
-            When creating a file or directory and the parent folder does not have a default ACL,
-            the umask restricts the permissions of the file or directory to be created.
-            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
-            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
-            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
-            The umask must be specified in 4-digit octal notation (e.g. 0766).
-        :param permissions: Optional and only valid if Hierarchical Namespace
-            is enabled for the account. Sets POSIX access permissions for the file
-            owner, the file owning group, and others. Each class may be granted
-            read, write, or execute permission.  The sticky bit is also supported.
-            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
-            supported.
-        :type permissions: str
-        :param ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :param ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :param ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :param int timeout:
-            The timeout parameter is expressed in seconds.
-        :return:
-        """
-        options = self._rename_path_options(
-            rename_source,
-            **kwargs)
-        try:
-            return await self._client.path.create(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _get_path_properties(self, **kwargs):
-        # type: (**Any) -> Union[FileProperties, DirectoryProperties]
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file or directory. It does not return the content of the directory or file.
-
-        :keyword lease:
-            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
-            or the lease ID as a string.
-        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: DirectoryProperties or FileProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../tests/test_blob_samples_common.py
-                :start-after: [START get_blob_properties]
-                :end-before: [END get_blob_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting the properties for a file/directory.
-        """
-        path_properties = await self._blob_client.get_blob_properties(**kwargs)
-        path_properties.__class__ = DirectoryProperties
-        return path_properties
-
-    async def set_metadata(self, metadata=None,  # type: Optional[Dict[str, str]]
-                           **kwargs):
-        # type: (...) -> Dict[str, Union[str, datetime]]
-        """Sets one or more user-defined name-value pairs for the specified
-        file system. Each call to this operation replaces all existing metadata
-        attached to the file system. To remove all metadata from the file system,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the file system as
-            metadata. Example: {'category':'test'}
-        :type metadata: dict[str, str]
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, set_file_system_metadata only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: file system-updated property dict (Etag and last modified).
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START set_file_system_metadata]
-                :end-before: [END set_file_system_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Setting metadata on the container.
-        """
-        return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
-
-    async def set_http_headers(self, content_settings=None,  # type: Optional[ContentSettings]
-                               **kwargs):
-        # type: (...) -> Dict[str, Any]
-        """Sets system properties on the file or directory.
-
-        If one property is set for the content_settings, all properties will be overriden.
-
-        :param ~azure.storage.filedatalake.ContentSettings content_settings:
-            ContentSettings object used to set file/directory properties.
-        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
-            If specified, set_file_system_metadata only succeeds if the
-            file system's lease is active and matches this ID.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: file/directory-updated property dict (Etag and last modified)
-        :rtype: Dict[str, Any]
-        """
-        return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
-
-    async def acquire_lease(self, lease_duration=-1,  # type: Optional[int]
-                            lease_id=None,  # type: Optional[str]
-                            **kwargs):
-        # type: (...) -> DataLakeLeaseClient
-        """
-        Requests a new lease. If the file or directory does not have an active lease,
-        the DataLake service creates a lease on the file/directory and returns a new
-        lease ID.
-
-        :param int lease_duration:
-            Specifies the duration of the lease, in seconds, or negative one
-            (-1) for a lease that never expires. A non-infinite lease can be
-            between 15 and 60 seconds. A lease duration cannot be changed
-            using renew or change. Default is -1 (infinite lease).
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The DataLake service returns
-            400 (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword ~datetime.datetime if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only
-            if the resource has been modified since the specified time.
-        :keyword ~datetime.datetime if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this header to perform the operation only if
-            the resource has not been modified since the specified date/time.
-        :keyword str etag:
-            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions match_condition:
-            The match condition to use upon the etag.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
-        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/test_file_system_samples.py
-                :start-after: [START acquire_lease_on_file_system]
-                :end-before: [END acquire_lease_on_file_system]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on the file_system.
-        """
-        lease = DataLakeLeaseClient(self, lease_id=lease_id)  # type: ignore
-        await lease.acquire(lease_duration=lease_duration, **kwargs)
-        return lease
diff -pruN 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini
--- 1.4.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-[mypy]
-python_version = 3.6
-warn_return_any = True
-warn_unused_configs = True
-ignore_missing_imports = True
-
-# Per-module options:
-
-[mypy-azure.storage.filedatalake._generated.*]
-ignore_errors = True
-
-[mypy-azure.core.*]
-ignore_errors = True
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/__init__.py	2025-06-18 05:27:42.000000000 +0000
@@ -1 +1 @@
-﻿__import__('pkg_resources').declare_namespace(__name__)
+﻿
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,68 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._version import VERSION
-from ._file_client import ShareFileClient
-from ._directory_client import ShareDirectoryClient
-from ._share_client import ShareClient
-from ._share_service_client import ShareServiceClient
-from ._lease import ShareLeaseClient
-from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas
-from ._shared.policies import ExponentialRetry, LinearRetry
-from ._shared.models import (
-    LocationMode,
-    ResourceTypes,
-    AccountSasPermissions,
-    StorageErrorCode)
-from ._models import (
-    ShareProperties,
-    DirectoryProperties,
-    Handle,
-    FileProperties,
-    Metrics,
-    RetentionPolicy,
-    CorsRule,
-    AccessPolicy,
-    FileSasPermissions,
-    ShareSasPermissions,
-    ContentSettings,
-    NTFSAttributes)
-from ._generated.models import (
-    HandleItem
-)
-
-__version__ = VERSION
-
-
-__all__ = [
-    'ShareFileClient',
-    'ShareDirectoryClient',
-    'ShareClient',
-    'ShareServiceClient',
-    'ShareLeaseClient',
-    'ExponentialRetry',
-    'LinearRetry',
-    'LocationMode',
-    'ResourceTypes',
-    'AccountSasPermissions',
-    'StorageErrorCode',
-    'Metrics',
-    'RetentionPolicy',
-    'CorsRule',
-    'AccessPolicy',
-    'FileSasPermissions',
-    'ShareSasPermissions',
-    'ShareProperties',
-    'DirectoryProperties',
-    'FileProperties',
-    'ContentSettings',
-    'Handle',
-    'NTFSAttributes',
-    'HandleItem',
-    'generate_account_sas',
-    'generate_share_sas',
-    'generate_file_sas'
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_deserialize.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_deserialize.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_deserialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_deserialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,64 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._models import ShareProperties, DirectoryProperties, FileProperties
-from ._shared.response_handlers import deserialize_metadata
-
-
-def deserialize_share_properties(response, obj, headers):
-    metadata = deserialize_metadata(response, obj, headers)
-    share_properties = ShareProperties(
-        metadata=metadata,
-        **headers
-    )
-    return share_properties
-
-
-def deserialize_directory_properties(response, obj, headers):
-    metadata = deserialize_metadata(response, obj, headers)
-    directory_properties = DirectoryProperties(
-        metadata=metadata,
-        **headers
-    )
-    return directory_properties
-
-
-def deserialize_file_properties(response, obj, headers):
-    metadata = deserialize_metadata(response, obj, headers)
-    file_properties = FileProperties(
-        metadata=metadata,
-        **headers
-    )
-    if 'Content-Range' in headers:
-        if 'x-ms-content-md5' in headers:
-            file_properties.content_settings.content_md5 = headers['x-ms-content-md5']
-        else:
-            file_properties.content_settings.content_md5 = None
-    return file_properties
-
-
-def deserialize_file_stream(response, obj, headers):
-    file_properties = deserialize_file_properties(response, obj, headers)
-    obj.properties = file_properties
-    return response.location_mode, obj
-
-
-def deserialize_permission(response, obj, headers):  # pylint: disable=unused-argument
-    '''
-    Extracts out file permission
-    '''
-
-    return obj.permission
-
-
-def deserialize_permission_key(response, obj, headers):  # pylint: disable=unused-argument
-    '''
-    Extracts out file permission key
-    '''
-
-    if response is None or headers is None:
-        return None
-    return headers.get('x-ms-file-permission-key', None)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_directory_client.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_directory_client.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_directory_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_directory_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,706 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-import time
-from typing import (  # pylint: disable=unused-import
-    Optional, Union, Any, Dict, TYPE_CHECKING
-)
-
-try:
-    from urllib.parse import urlparse, quote, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-from azure.core.paging import ItemPaged
-from azure.core.pipeline import Pipeline
-from azure.core.tracing.decorator import distributed_trace
-
-from ._generated import AzureFileStorage
-from ._generated.version import VERSION
-from ._generated.models import StorageErrorException
-from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
-from ._shared.request_handlers import add_metadata_headers
-from ._shared.response_handlers import return_response_headers, process_storage_error
-from ._shared.parser import _str
-from ._parser import _get_file_permission, _datetime_to_str
-from ._deserialize import deserialize_directory_properties
-from ._serialize import get_api_version
-from ._file_client import ShareFileClient
-from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes  # pylint: disable=unused-import
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from ._models import ShareProperties, DirectoryProperties, ContentSettings
-    from ._generated.models import HandleItem
-
-
-class ShareDirectoryClient(StorageAccountHostsMixin):
-    """A client to interact with a specific directory, although it may not yet exist.
-
-    For operations relating to a specific subdirectory or file in this share, the clients for those
-    entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the directory,
-        use the :func:`from_directory_url` classmethod.
-    :param share_name:
-        The name of the share for the directory.
-    :type share_name: str
-    :param str directory_path:
-        The directory path for the directory with which to interact.
-        If specified, this value will override a directory value specified in the directory URL.
-    :param str snapshot:
-        An optional share snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`ShareClient.create_snapshot`.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-    """
-    def __init__( # type: ignore
-            self, account_url,  # type: str
-            share_name, # type: str
-            directory_path, # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None, # type: Optional[Any]
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not share_name:
-            raise ValueError("Please specify a share name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-        if hasattr(credential, 'get_token'):
-            raise ValueError("Token credentials not supported by the File service.")
-
-        path_snapshot, sas_token = parse_query(parsed_url.query)
-        if not sas_token and not credential:
-            raise ValueError(
-                'You need to provide either an account shared key or SAS token when creating a storage service.')
-        try:
-            self.snapshot = snapshot.snapshot # type: ignore
-        except AttributeError:
-            try:
-                self.snapshot = snapshot['snapshot'] # type: ignore
-            except TypeError:
-                self.snapshot = snapshot or path_snapshot
-
-        self.share_name = share_name
-        self.directory_path = directory_path
-
-        self._query_str, credential = self._format_query_string(
-            sas_token, credential, share_snapshot=self.snapshot)
-        super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    @classmethod
-    def from_directory_url(cls, directory_url,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None, # type: Optional[Any]
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> ShareDirectoryClient
-        """Create a ShareDirectoryClient from a directory url.
-
-        :param str directory_url:
-            The full URI to the directory.
-        :param str snapshot:
-            An optional share snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`ShareClient.create_snapshot`.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A directory client.
-        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
-        """
-        try:
-            if not directory_url.lower().startswith('http'):
-                directory_url = "https://" + directory_url
-        except AttributeError:
-            raise ValueError("Directory URL must be a string.")
-        parsed_url = urlparse(directory_url.rstrip('/'))
-        if not parsed_url.path and not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(directory_url))
-        account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query
-        path_snapshot, _ = parse_query(parsed_url.query)
-
-        share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/')
-        share_name = unquote(share_name)
-
-        directory_path = path_dir
-        snapshot = snapshot or path_snapshot
-
-        return cls(
-            account_url=account_url, share_name=share_name, directory_path=directory_path,
-            credential=credential, **kwargs)
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        share_name = self.share_name
-        if isinstance(share_name, six.text_type):
-            share_name = share_name.encode('UTF-8')
-        directory_path = ""
-        if self.directory_path:
-            directory_path = "/" + quote(self.directory_path, safe='~')
-        return "{}://{}/{}{}{}".format(
-            self.scheme,
-            hostname,
-            quote(share_name),
-            directory_path,
-            self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            share_name,  # type: str
-            directory_path,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> ShareDirectoryClient
-        """Create ShareDirectoryClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param share_name: The name of the share.
-        :type share_name: str
-        :param str directory_path:
-            The directory path.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A directory client.
-        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs)
-
-    def get_file_client(self, file_name, **kwargs):
-        # type: (str, Any) -> ShareFileClient
-        """Get a client to interact with a specific file.
-
-        The file need not already exist.
-
-        :param file_name:
-            The name of the file.
-        :returns: A File Client.
-        :rtype: ~azure.storage.fileshare.ShareFileClient
-        """
-        if self.directory_path:
-            file_name = self.directory_path.rstrip('/') + "/" + file_name
-
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ShareFileClient(
-            self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version,
-            _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs)
-
-    def get_subdirectory_client(self, directory_name, **kwargs):
-        # type: (str, Any) -> ShareDirectoryClient
-        """Get a client to interact with a specific subdirectory.
-
-        The subdirectory need not already exist.
-
-        :param str directory_name:
-            The name of the subdirectory.
-        :returns: A Directory Client.
-        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START get_subdirectory_client]
-                :end-before: [END get_subdirectory_client]
-                :language: python
-                :dedent: 12
-                :caption: Gets the subdirectory client.
-        """
-        directory_path = self.directory_path.rstrip('/') + "/" + directory_name
-
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ShareDirectoryClient(
-            self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
-            _location_mode=self._location_mode, **kwargs)
-
-    @distributed_trace
-    def create_directory(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Creates a new directory under the directory referenced by the client.
-
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the directory as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Directory-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START create_directory]
-                :end-before: [END create_directory]
-                :language: python
-                :dedent: 12
-                :caption: Creates a directory.
-        """
-        timeout = kwargs.pop('timeout', None)
-        metadata = kwargs.pop('metadata', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return self._client.directory.create( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def delete_directory(self, **kwargs):
-        # type: (**Any) -> None
-        """Marks the directory for deletion. The directory is
-        later deleted during garbage collection.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START delete_directory]
-                :end-before: [END delete_directory]
-                :language: python
-                :dedent: 12
-                :caption: Deletes a directory.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            self._client.directory.delete(timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_directories_and_files(self, name_starts_with=None, **kwargs):
-        # type: (Optional[str], **Any) -> ItemPaged
-        """Lists all the directories and files under the directory.
-
-        :param str name_starts_with:
-            Filters the results to return only entities whose names
-            begin with the specified prefix.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
-        :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START lists_directory]
-                :end-before: [END lists_directory]
-                :language: python
-                :dedent: 12
-                :caption: List directories and files.
-        """
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.directory.list_files_and_directories_segment,
-            sharesnapshot=self.snapshot,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=DirectoryPropertiesPaged)
-
-    @distributed_trace
-    def list_handles(self, recursive=False, **kwargs):
-        # type: (bool, Any) -> ItemPaged
-        """Lists opened handles on a directory or a file under the directory.
-
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified by the client,
-            its files, its subdirectories and their files. Default value is False.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of HandleItem
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem]
-        """
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.directory.list_handles,
-            sharesnapshot=self.snapshot,
-            timeout=timeout,
-            recursive=recursive,
-            **kwargs)
-        return ItemPaged(
-            command, results_per_page=results_per_page,
-            page_iterator_class=HandlesPaged)
-
-    @distributed_trace
-    def close_handle(self, handle, **kwargs):
-        # type: (Union[str, HandleItem], Any) -> Dict[str, int]
-        """Close an open file handle.
-
-        :param handle:
-            A specific handle to close.
-        :type handle: str or ~azure.storage.fileshare.Handle
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        try:
-            handle_id = handle.id # type: ignore
-        except AttributeError:
-            handle_id = handle
-        if handle_id == '*':
-            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
-        try:
-            response = self._client.directory.force_close_handles(
-                handle_id,
-                marker=None,
-                recursive=None,
-                sharesnapshot=self.snapshot,
-                cls=return_response_headers,
-                **kwargs
-            )
-            return {
-                'closed_handles_count': response.get('number_of_handles_closed', 0),
-                'failed_handles_count': response.get('number_of_handles_failed', 0)
-            }
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def close_all_handles(self, recursive=False, **kwargs):
-        # type: (bool, Any) -> Dict[str, int]
-        """Close any open file handles.
-
-        This operation will block until the service has closed all open handles.
-
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified by the client,
-            its files, its subdirectories and their files. Default value is False.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        timeout = kwargs.pop('timeout', None)
-        start_time = time.time()
-
-        try_close = True
-        continuation_token = None
-        total_closed = 0
-        total_failed = 0
-        while try_close:
-            try:
-                response = self._client.directory.force_close_handles(
-                    handle_id='*',
-                    timeout=timeout,
-                    marker=continuation_token,
-                    recursive=recursive,
-                    sharesnapshot=self.snapshot,
-                    cls=return_response_headers,
-                    **kwargs
-                )
-            except StorageErrorException as error:
-                process_storage_error(error)
-            continuation_token = response.get('marker')
-            try_close = bool(continuation_token)
-            total_closed += response.get('number_of_handles_closed', 0)
-            total_failed += response.get('number_of_handles_failed', 0)
-            if timeout:
-                timeout = max(0, timeout - (time.time() - start_time))
-        return {
-            'closed_handles_count': total_closed,
-            'failed_handles_count': total_failed
-        }
-
-    @distributed_trace
-    def get_directory_properties(self, **kwargs):
-        # type: (Any) -> DirectoryProperties
-        """Returns all user-defined metadata and system properties for the
-        specified directory. The data returned does not include the directory's
-        list of files.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: DirectoryProperties
-        :rtype: ~azure.storage.fileshare.DirectoryProperties
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response = self._client.directory.get_properties(
-                timeout=timeout,
-                cls=deserialize_directory_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return response # type: ignore
-
-    @distributed_trace
-    def set_directory_metadata(self, metadata, **kwargs):
-        # type: (Dict[str, Any], Any) ->  Dict[str, Any]
-        """Sets the metadata for the directory.
-
-        Each call to this operation replaces all existing metadata
-        attached to the directory. To remove all metadata from the directory,
-        call this operation with an empty metadata dict.
-
-        :param metadata:
-            Name-value pairs associated with the directory as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Directory-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        try:
-            return self._client.directory.set_metadata( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_http_headers(self, file_attributes="none",  # type: Union[str, NTFSAttributes]
-                         file_creation_time="preserve",  # type: Union[str, datetime]
-                         file_last_write_time="preserve",  # type: Union[str, datetime]
-                         file_permission=None,   # type: Optional[str]
-                         permission_key=None,   # type: Optional[str]
-                         **kwargs  # type: Any
-                         ):
-        # type: (...) -> Dict[str, Any]
-        """Sets HTTP headers on the directory.
-
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, indicates preservation of existing values.
-            Here is an example for when the var type is str: 'Temporary|Archive'
-        :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :param file_creation_time: Creation time for the file
-            Default value: Preserve.
-        :type file_creation_time: str or datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Preserve.
-        :type file_last_write_time: str or datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        timeout = kwargs.pop('timeout', None)
-        file_permission = _get_file_permission(file_permission, permission_key, 'preserve')
-        try:
-            return self._client.directory.set_properties(  # type: ignore
-                file_attributes=_str(file_attributes),
-                file_creation_time=_datetime_to_str(file_creation_time),
-                file_last_write_time=_datetime_to_str(file_last_write_time),
-                file_permission=file_permission,
-                file_permission_key=permission_key,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def create_subdirectory(
-            self, directory_name,  # type: str
-            **kwargs):
-        # type: (...) -> ShareDirectoryClient
-        """Creates a new subdirectory and returns a client to interact
-        with the subdirectory.
-
-        :param str directory_name:
-            The name of the subdirectory.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the subdirectory as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: ShareDirectoryClient
-        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START create_subdirectory]
-                :end-before: [END create_subdirectory]
-                :language: python
-                :dedent: 12
-                :caption: Create a subdirectory.
-        """
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        subdir = self.get_subdirectory_client(directory_name)
-        subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs)
-        return subdir # type: ignore
-
-    @distributed_trace
-    def delete_subdirectory(
-            self, directory_name,  # type: str
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Deletes a subdirectory.
-
-        :param str directory_name:
-            The name of the subdirectory.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START delete_subdirectory]
-                :end-before: [END delete_subdirectory]
-                :language: python
-                :dedent: 12
-                :caption: Delete a subdirectory.
-        """
-        timeout = kwargs.pop('timeout', None)
-        subdir = self.get_subdirectory_client(directory_name)
-        subdir.delete_directory(timeout=timeout, **kwargs)
-
-    @distributed_trace
-    def upload_file(
-            self, file_name,  # type: str
-            data, # type: Any
-            length=None, # type: Optional[int]
-            **kwargs # type: Any
-        ):
-        # type: (...) -> ShareFileClient
-        """Creates a new file in the directory and returns a ShareFileClient
-        to interact with the file.
-
-        :param str file_name:
-            The name of the file.
-        :param Any data:
-            Content of the file.
-        :param int length:
-            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the file as metadata.
-        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            file.
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: ShareFileClient
-        :rtype: ~azure.storage.fileshare.ShareFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START upload_file_to_directory]
-                :end-before: [END upload_file_to_directory]
-                :language: python
-                :dedent: 12
-                :caption: Upload a file to a directory.
-        """
-        file_client = self.get_file_client(file_name)
-        file_client.upload_file(
-            data,
-            length=length,
-            **kwargs)
-        return file_client # type: ignore
-
-    @distributed_trace
-    def delete_file(
-            self, file_name,  # type: str
-            **kwargs  # type: Optional[Any]
-        ):
-        # type: (...) -> None
-        """Marks the specified file for deletion. The file is later
-        deleted during garbage collection.
-
-        :param str file_name:
-            The name of the file to delete.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory.py
-                :start-after: [START delete_file_in_directory]
-                :end-before: [END delete_file_in_directory]
-                :language: python
-                :dedent: 12
-                :caption: Delete a file in a directory.
-        """
-        file_client = self.get_file_client(file_name)
-        file_client.delete_file(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_download.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_download.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_download.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_download.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,522 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-import threading
-import warnings
-from io import BytesIO
-
-from azure.core.exceptions import HttpResponseError
-from azure.core.tracing.common import with_current_context
-from ._shared.encryption import decrypt_blob
-from ._shared.request_handlers import validate_and_format_range_headers
-from ._shared.response_handlers import process_storage_error, parse_length_from_content_range
-
-
-def process_range_and_offset(start_range, end_range, length, encryption):
-    start_offset, end_offset = 0, 0
-    if encryption.get("key") is not None or encryption.get("resolver") is not None:
-        if start_range is not None:
-            # Align the start of the range along a 16 byte block
-            start_offset = start_range % 16
-            start_range -= start_offset
-
-            # Include an extra 16 bytes for the IV if necessary
-            # Because of the previous offsetting, start_range will always
-            # be a multiple of 16.
-            if start_range > 0:
-                start_offset += 16
-                start_range -= 16
-
-        if length is not None:
-            # Align the end of the range along a 16 byte block
-            end_offset = 15 - (end_range % 16)
-            end_range += end_offset
-
-    return (start_range, end_range), (start_offset, end_offset)
-
-
-def process_content(data, start_offset, end_offset, encryption):
-    if data is None:
-        raise ValueError("Response cannot be None.")
-    try:
-        content = b"".join(list(data))
-    except Exception as error:
-        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
-    if content and encryption.get("key") is not None or encryption.get("resolver") is not None:
-        try:
-            return decrypt_blob(
-                encryption.get("required"),
-                encryption.get("key"),
-                encryption.get("resolver"),
-                content,
-                start_offset,
-                end_offset,
-                data.response.headers,
-            )
-        except Exception as error:
-            raise HttpResponseError(message="Decryption failed.", response=data.response, error=error)
-    return content
-
-
-class _ChunkDownloader(object):  # pylint: disable=too-many-instance-attributes
-    def __init__(
-        self,
-        client=None,
-        total_size=None,
-        chunk_size=None,
-        current_progress=None,
-        start_range=None,
-        end_range=None,
-        stream=None,
-        parallel=None,
-        validate_content=None,
-        encryption_options=None,
-        **kwargs
-    ):
-        self.client = client
-
-        # Information on the download range/chunk size
-        self.chunk_size = chunk_size
-        self.total_size = total_size
-        self.start_index = start_range
-        self.end_index = end_range
-
-        # The destination that we will write to
-        self.stream = stream
-        self.stream_lock = threading.Lock() if parallel else None
-        self.progress_lock = threading.Lock() if parallel else None
-
-        # For a parallel download, the stream is always seekable, so we note down the current position
-        # in order to seek to the right place when out-of-order chunks come in
-        self.stream_start = stream.tell() if parallel else None
-
-        # Download progress so far
-        self.progress_total = current_progress
-
-        # Encryption
-        self.encryption_options = encryption_options
-
-        # Parameters for each get operation
-        self.validate_content = validate_content
-        self.request_options = kwargs
-
-    def _calculate_range(self, chunk_start):
-        if chunk_start + self.chunk_size > self.end_index:
-            chunk_end = self.end_index
-        else:
-            chunk_end = chunk_start + self.chunk_size
-        return chunk_start, chunk_end
-
-    def get_chunk_offsets(self):
-        index = self.start_index
-        while index < self.end_index:
-            yield index
-            index += self.chunk_size
-
-    def process_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        chunk_data = self._download_chunk(chunk_start, chunk_end - 1)
-        length = chunk_end - chunk_start
-        if length > 0:
-            self._write_to_stream(chunk_data, chunk_start)
-            self._update_progress(length)
-
-    def yield_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        return self._download_chunk(chunk_start, chunk_end - 1)
-
-    def _update_progress(self, length):
-        if self.progress_lock:
-            with self.progress_lock:  # pylint: disable=not-context-manager
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    def _write_to_stream(self, chunk_data, chunk_start):
-        if self.stream_lock:
-            with self.stream_lock:  # pylint: disable=not-context-manager
-                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-                self.stream.write(chunk_data)
-        else:
-            self.stream.write(chunk_data)
-
-    def _download_chunk(self, chunk_start, chunk_end):
-        download_range, offset = process_range_and_offset(
-            chunk_start, chunk_end, chunk_end, self.encryption_options
-        )
-        range_header, range_validation = validate_and_format_range_headers(
-            download_range[0], download_range[1], check_content_md5=self.validate_content
-        )
-
-        try:
-            _, response = self.client.download(
-                range=range_header,
-                range_get_content_md5=range_validation,
-                validate_content=self.validate_content,
-                data_stream_total=self.total_size,
-                download_stream_current=self.progress_total,
-                **self.request_options
-            )
-        except HttpResponseError as error:
-            process_storage_error(error)
-
-        chunk_data = process_content(response, offset[0], offset[1], self.encryption_options)
-        return chunk_data
-
-
-class _ChunkIterator(object):
-    """Async iterator for chunks in blob download stream."""
-
-    def __init__(self, size, content, downloader):
-        self.size = size
-        self._current_content = content
-        self._iter_downloader = downloader
-        self._iter_chunks = None
-        self._complete = (size == 0)
-
-    def __len__(self):
-        return self.size
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        """Iterate through responses."""
-        if self._complete:
-            raise StopIteration("Download complete")
-        if not self._iter_downloader:
-            # If no iterator was supplied, the download completed with
-            # the initial GET, so we just return that data
-            self._complete = True
-            return self._current_content
-
-        if not self._iter_chunks:
-            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
-        else:
-            chunk = next(self._iter_chunks)
-            self._current_content = self._iter_downloader.yield_chunk(chunk)
-
-        return self._current_content
-
-    next = __next__  # Python 2 compatibility.
-
-
-class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
-    """A streaming object to download from Azure Storage.
-
-    :ivar str name:
-        The name of the file being downloaded.
-    :ivar: str path:
-        The full path of the file.
-    :ivar str share:
-        The name of the share where the file is.
-    :ivar ~azure.storage.fileshare.FileProperties properties:
-        The properties of the file being downloaded. If only a range of the data is being
-        downloaded, this will be reflected in the properties.
-    :ivar int size:
-        The size of the total data in the stream. This will be the byte range if speficied,
-        otherwise the total size of the file.
-    """
-
-    def __init__(
-        self,
-        client=None,
-        config=None,
-        start_range=None,
-        end_range=None,
-        validate_content=None,
-        encryption_options=None,
-        max_concurrency=1,
-        name=None,
-        path=None,
-        share=None,
-        encoding=None,
-        **kwargs
-    ):
-        self.name = name
-        self.path = path
-        self.share = share
-        self.properties = None
-        self.size = None
-
-        self._client = client
-        self._config = config
-        self._start_range = start_range
-        self._end_range = end_range
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        self._validate_content = validate_content
-        self._encryption_options = encryption_options or {}
-        self._request_options = kwargs
-        self._location_mode = None
-        self._download_complete = False
-        self._current_content = None
-        self._file_size = None
-        self._response = None
-
-        # The service only provides transactional MD5s for chunks under 4MB.
-        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
-        # chunk so a transactional MD5 can be retrieved.
-        self._first_get_size = (
-            self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
-        )
-        initial_request_start = self._start_range if self._start_range is not None else 0
-        if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
-            initial_request_end = self._end_range
-        else:
-            initial_request_end = initial_request_start + self._first_get_size - 1
-
-        self._initial_range, self._initial_offset = process_range_and_offset(
-            initial_request_start, initial_request_end, self._end_range, self._encryption_options
-        )
-
-        self._response = self._initial_request()
-        self.properties = self._response.properties
-        self.properties.name = self.name
-        self.properties.path = self.path
-        self.properties.share = self.share
-
-        # Set the content length to the download size instead of the size of
-        # the last range
-        self.properties.size = self.size
-
-        # Overwrite the content range to the user requested range
-        self.properties.content_range = "bytes {0}-{1}/{2}".format(
-            self._start_range,
-            self._end_range,
-            self._file_size
-        )
-
-        # Overwrite the content MD5 as it is the MD5 for the last range instead
-        # of the stored MD5
-        # TODO: Set to the stored MD5 when the service returns this
-        self.properties.content_md5 = None
-
-        if self.size == 0:
-            self._current_content = b""
-        else:
-            self._current_content = process_content(
-                self._response,
-                self._initial_offset[0],
-                self._initial_offset[1],
-                self._encryption_options
-            )
-
-    def __len__(self):
-        return self.size
-
-    def _initial_request(self):
-        range_header, range_validation = validate_and_format_range_headers(
-            self._initial_range[0],
-            self._initial_range[1],
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=self._validate_content
-        )
-
-        try:
-            location_mode, response = self._client.download(
-                range=range_header,
-                range_get_content_md5=range_validation,
-                validate_content=self._validate_content,
-                data_stream_total=None,
-                download_stream_current=0,
-                **self._request_options
-            )
-
-            # Check the location we read from to ensure we use the same one
-            # for subsequent requests.
-            self._location_mode = location_mode
-
-            # Parse the total file size and adjust the download size if ranges
-            # were specified
-            self._file_size = parse_length_from_content_range(response.properties.content_range)
-            if self._end_range is not None:
-                # Use the end range index unless it is over the end of the file
-                self.size = min(self._file_size, self._end_range - self._start_range + 1)
-            elif self._start_range is not None:
-                self.size = self._file_size - self._start_range
-            else:
-                self.size = self._file_size
-
-        except HttpResponseError as error:
-            if self._start_range is None and error.response.status_code == 416:
-                # Get range will fail on an empty file. If the user did not
-                # request a range, do a regular get request in order to get
-                # any properties.
-                try:
-                    _, response = self._client.download(
-                        validate_content=self._validate_content,
-                        data_stream_total=0,
-                        download_stream_current=0,
-                        **self._request_options
-                    )
-                except HttpResponseError as error:
-                    process_storage_error(error)
-
-                # Set the download size to empty
-                self.size = 0
-                self._file_size = 0
-            else:
-                process_storage_error(error)
-
-        # If the file is small, the download is complete at this point.
-        # If file size is large, download the rest of the file in chunks.
-        if response.properties.size == self.size:
-            self._download_complete = True
-        return response
-
-    def chunks(self):
-        if self.size == 0 or self._download_complete:
-            iter_downloader = None
-        else:
-            data_end = self._file_size
-            if self._end_range is not None:
-                # Use the end range index unless it is over the end of the file
-                data_end = min(self._file_size, self._end_range + 1)
-            iter_downloader = _ChunkDownloader(
-                client=self._client,
-                total_size=self.size,
-                chunk_size=self._config.max_chunk_get_size,
-                current_progress=self._first_get_size,
-                start_range=self._initial_range[1] + 1,  # start where the first download ended
-                end_range=data_end,
-                stream=None,
-                parallel=False,
-                validate_content=self._validate_content,
-                encryption_options=self._encryption_options,
-                use_location=self._location_mode,
-                **self._request_options
-            )
-        return _ChunkIterator(
-            size=self.size,
-            content=self._current_content,
-            downloader=iter_downloader)
-
-    def readall(self):
-        """Download the contents of this file.
-
-        This operation is blocking until all data is downloaded.
-        :rtype: bytes or str
-        """
-        stream = BytesIO()
-        self.readinto(stream)
-        data = stream.getvalue()
-        if self._encoding:
-            return data.decode(self._encoding)
-        return data
-
-    def content_as_bytes(self, max_concurrency=1):
-        """Download the contents of this file.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :rtype: bytes
-        """
-        warnings.warn(
-            "content_as_bytes is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        return self.readall()
-
-    def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
-        """Download the contents of this file, and decode as text.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :param str encoding:
-            Test encoding to decode the downloaded bytes. Default is UTF-8.
-        :rtype: str
-        """
-        warnings.warn(
-            "content_as_text is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        return self.readall()
-
-    def readinto(self, stream):
-        """Download the contents of this file to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The number of bytes read.
-        :rtype: int
-        """
-        # The stream must be seekable if parallel download is required
-        parallel = self._max_concurrency > 1
-        if parallel:
-            error_message = "Target stream handle must be seekable."
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(error_message)
-
-            try:
-                stream.seek(stream.tell())
-            except (NotImplementedError, AttributeError):
-                raise ValueError(error_message)
-
-        # Write the content to the user stream
-        stream.write(self._current_content)
-        if self._download_complete:
-            return self.size
-
-        data_end = self._file_size
-        if self._end_range is not None:
-            # Use the length unless it is over the end of the file
-            data_end = min(self._file_size, self._end_range + 1)
-
-        downloader = _ChunkDownloader(
-            client=self._client,
-            total_size=self.size,
-            chunk_size=self._config.max_chunk_get_size,
-            current_progress=self._first_get_size,
-            start_range=self._initial_range[1] + 1,  # Start where the first download ended
-            end_range=data_end,
-            stream=stream,
-            parallel=parallel,
-            validate_content=self._validate_content,
-            encryption_options=self._encryption_options,
-            use_location=self._location_mode,
-            **self._request_options
-        )
-        if parallel:
-            import concurrent.futures
-            executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency)
-            list(executor.map(
-                    with_current_context(downloader.process_chunk),
-                    downloader.get_chunk_offsets()
-                ))
-        else:
-            for chunk in downloader.get_chunk_offsets():
-                downloader.process_chunk(chunk)
-        return self.size
-
-    def download_to_stream(self, stream, max_concurrency=1):
-        """Download the contents of this file to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The properties of the downloaded file.
-        :rtype: Any
-        """
-        warnings.warn(
-            "download_to_stream is deprecated, use readinto instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        self.readinto(stream)
-        return self.properties
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_file_client.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_file_client.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_file_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_file_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1328 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-many-lines
-import functools
-import time
-from io import BytesIO
-from typing import ( # pylint: disable=unused-import
-    Optional, Union, IO, List, Dict, Any, Iterable,
-    TYPE_CHECKING
-)
-
-try:
-    from urllib.parse import urlparse, quote, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-from azure.core.paging import ItemPaged  # pylint: disable=ungrouped-imports
-from azure.core.tracing.decorator import distributed_trace
-
-from ._generated import AzureFileStorage
-from ._generated.version import VERSION
-from ._generated.models import StorageErrorException, FileHTTPHeaders
-from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks
-from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query
-from ._shared.request_handlers import add_metadata_headers, get_length
-from ._shared.response_handlers import return_response_headers, process_storage_error
-from ._shared.parser import _str
-from ._parser import _get_file_permission, _datetime_to_str
-from ._lease import ShareLeaseClient
-from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version
-from ._deserialize import deserialize_file_properties, deserialize_file_stream
-from ._models import HandlesPaged, NTFSAttributes  # pylint: disable=unused-import
-from ._download import StorageStreamDownloader
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from ._models import ShareProperties, ContentSettings, FileProperties, Handle
-    from ._generated.models import HandleItem
-
-
-def _upload_file_helper(
-        client,
-        stream,
-        size,
-        metadata,
-        content_settings,
-        validate_content,
-        timeout,
-        max_concurrency,
-        file_settings,
-        file_attributes="none",
-        file_creation_time="now",
-        file_last_write_time="now",
-        file_permission=None,
-        file_permission_key=None,
-        **kwargs):
-    try:
-        if size is None or size < 0:
-            raise ValueError("A content size must be specified for a File.")
-        response = client.create_file(
-            size,
-            content_settings=content_settings,
-            metadata=metadata,
-            timeout=timeout,
-            file_attributes=file_attributes,
-            file_creation_time=file_creation_time,
-            file_last_write_time=file_last_write_time,
-            file_permission=file_permission,
-            permission_key=file_permission_key,
-            **kwargs
-        )
-        if size == 0:
-            return response
-
-        responses = upload_data_chunks(
-            service=client,
-            uploader_class=FileChunkUploader,
-            total_size=size,
-            chunk_size=file_settings.max_range_size,
-            stream=stream,
-            max_concurrency=max_concurrency,
-            validate_content=validate_content,
-            timeout=timeout,
-            **kwargs
-        )
-        return sorted(responses, key=lambda r: r.get('last_modified'))[-1]
-    except StorageErrorException as error:
-        process_storage_error(error)
-
-
-class ShareFileClient(StorageAccountHostsMixin):
-    """A client to interact with a specific file, although that file may not yet exist.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the
-        file, use the :func:`from_file_url` classmethod.
-    :param share_name:
-        The name of the share for the file.
-    :type share_name: str
-    :param str file_path:
-        The file path to the file with which to interact. If specified, this value will override
-        a file value specified in the file URL.
-    :param str snapshot:
-        An optional file snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`ShareClient.create_snapshot`.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-    """
-    def __init__( # type: ignore
-            self, account_url,  # type: str
-            share_name,  # type: str
-            file_path,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not (share_name and file_path):
-            raise ValueError("Please specify a share name and file name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-        if hasattr(credential, 'get_token'):
-            raise ValueError("Token credentials not supported by the File service.")
-
-        path_snapshot = None
-        path_snapshot, sas_token = parse_query(parsed_url.query)
-        if not sas_token and not credential:
-            raise ValueError(
-                'You need to provide either an account shared key or SAS token when creating a storage service.')
-        try:
-            self.snapshot = snapshot.snapshot # type: ignore
-        except AttributeError:
-            try:
-                self.snapshot = snapshot['snapshot'] # type: ignore
-            except TypeError:
-                self.snapshot = snapshot or path_snapshot
-
-        self.share_name = share_name
-        self.file_path = file_path.split('/')
-        self.file_name = self.file_path[-1]
-        self.directory_path = "/".join(self.file_path[:-1])
-
-        self._query_str, credential = self._format_query_string(
-            sas_token, credential, share_snapshot=self.snapshot)
-        super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    @classmethod
-    def from_file_url(
-            cls, file_url,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> ShareFileClient
-        """A client to interact with a specific file, although that file may not yet exist.
-
-        :param str file_url: The full URI to the file.
-        :param str snapshot:
-            An optional file snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`ShareClient.create_snapshot`.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A File client.
-        :rtype: ~azure.storage.fileshare.ShareFileClient
-        """
-        try:
-            if not file_url.lower().startswith('http'):
-                file_url = "https://" + file_url
-        except AttributeError:
-            raise ValueError("File URL must be a string.")
-        parsed_url = urlparse(file_url.rstrip('/'))
-
-        if not (parsed_url.netloc and parsed_url.path):
-            raise ValueError("Invalid URL: {}".format(file_url))
-        account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query
-
-        path_share, _, path_file = parsed_url.path.lstrip('/').partition('/')
-        path_snapshot, _ = parse_query(parsed_url.query)
-        snapshot = snapshot or path_snapshot
-        share_name = unquote(path_share)
-        file_path = '/'.join([unquote(p) for p in path_file.split('/')])
-        return cls(account_url, share_name, file_path, snapshot, credential, **kwargs)
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        share_name = self.share_name
-        if isinstance(share_name, six.text_type):
-            share_name = share_name.encode('UTF-8')
-        return "{}://{}/{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(share_name),
-            "/".join([quote(p, safe='~') for p in self.file_path]),
-            self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            share_name,  # type: str
-            file_path,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> ShareFileClient
-        """Create ShareFileClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param share_name: The name of the share.
-        :type share_name: str
-        :param str file_path:
-            The file path.
-        :param str snapshot:
-            An optional file snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`ShareClient.create_snapshot`.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A File client.
-        :rtype: ~azure.storage.fileshare.ShareFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_hello_world.py
-                :start-after: [START create_file_client]
-                :end-before: [END create_file_client]
-                :language: python
-                :dedent: 12
-                :caption: Creates the file client with connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs)
-
-    @distributed_trace
-    def acquire_lease(self, lease_id=None, **kwargs):
-        # type: (int, Optional[str], **Any) -> BlobLeaseClient
-        """Requests a new lease.
-
-        If the file does not have an active lease, the File
-        Service creates a lease on the blob and returns a new lease.
-
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The File Service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A ShareLeaseClient object.
-        :rtype: ~azure.storage.fileshare.ShareLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START acquire_lease_on_blob]
-                :end-before: [END acquire_lease_on_blob]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on a blob.
-        """
-        lease = ShareLeaseClient(self, lease_id=lease_id)  # type: ignore
-        lease.acquire(**kwargs)
-        return lease
-
-    @distributed_trace
-    def create_file(  # type: ignore
-            self, size,  # type: int
-            file_attributes="none",  # type: Union[str, NTFSAttributes]
-            file_creation_time="now",  # type: Union[str, datetime]
-            file_last_write_time="now",  # type: Union[str, datetime]
-            file_permission=None,   # type: Optional[str]
-            permission_key=None,  # type: Optional[str]
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> Dict[str, Any]
-        """Creates a new file.
-
-        Note that it only initializes the file with no content.
-
-        :param int size: Specifies the maximum size for the file,
-            up to 1 TB.
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, the default value would be "None" and the attributes will be set to "Archive".
-            Here is an example for when the var type is str: 'Temporary|Archive'.
-            file_attributes value is not case sensitive.
-        :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :param file_creation_time: Creation time for the file
-            Default value: Now.
-        :type file_creation_time: str or ~datetime.datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Now.
-        :type file_last_write_time: str or ~datetime.datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the file as metadata.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client.py
-                :start-after: [START create_file]
-                :end-before: [END create_file]
-                :language: python
-                :dedent: 12
-                :caption: Create a file.
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        content_settings = kwargs.pop('content_settings', None)
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        if self.require_encryption and not self.key_encryption_key:
-            raise ValueError("Encryption required but no key was provided.")
-
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        file_http_headers = None
-        if content_settings:
-            file_http_headers = FileHTTPHeaders(
-                file_cache_control=content_settings.cache_control,
-                file_content_type=content_settings.content_type,
-                file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                file_content_encoding=content_settings.content_encoding,
-                file_content_language=content_settings.content_language,
-                file_content_disposition=content_settings.content_disposition
-            )
-        file_permission = _get_file_permission(file_permission, permission_key, 'Inherit')
-        try:
-            return self._client.file.create(  # type: ignore
-                file_content_length=size,
-                metadata=metadata,
-                file_attributes=_str(file_attributes),
-                file_creation_time=_datetime_to_str(file_creation_time),
-                file_last_write_time=_datetime_to_str(file_last_write_time),
-                file_permission=file_permission,
-                file_permission_key=permission_key,
-                file_http_headers=file_http_headers,
-                lease_access_conditions=access_conditions,
-                headers=headers,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def upload_file(
-            self, data,  # type: Any
-            length=None,  # type: Optional[int]
-            file_attributes="none",  # type: Union[str, NTFSAttributes]
-            file_creation_time="now",  # type: Union[str, datetime]
-            file_last_write_time="now",  # type: Union[str, datetime]
-            file_permission=None,  # type: Optional[str]
-            permission_key=None,  # type: Optional[str]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Uploads a new file.
-
-        :param Any data:
-            Content of the file.
-        :param int length:
-            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, the default value would be "None" and the attributes will be set to "Archive".
-            Here is an example for when the var type is str: 'Temporary|Archive'.
-            file_attributes value is not case sensitive.
-        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes
-        :param file_creation_time: Creation time for the file
-            Default value: Now.
-        :type file_creation_time: str or ~datetime.datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Now.
-        :type file_last_write_time: str or ~datetime.datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the file as metadata.
-        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            file.
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client.py
-                :start-after: [START upload_file]
-                :end-before: [END upload_file]
-                :language: python
-                :dedent: 12
-                :caption: Upload a file.
-        """
-        metadata = kwargs.pop('metadata', None)
-        content_settings = kwargs.pop('content_settings', None)
-        max_concurrency = kwargs.pop('max_concurrency', 1)
-        validate_content = kwargs.pop('validate_content', False)
-        timeout = kwargs.pop('timeout', None)
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Encryption not supported.")
-
-        if isinstance(data, six.text_type):
-            data = data.encode(encoding)
-        if length is None:
-            length = get_length(data)
-        if isinstance(data, bytes):
-            data = data[:length]
-
-        if isinstance(data, bytes):
-            stream = BytesIO(data)
-        elif hasattr(data, 'read'):
-            stream = data
-        elif hasattr(data, '__iter__'):
-            stream = IterStreamer(data, encoding=encoding) # type: ignore
-        else:
-            raise TypeError("Unsupported data type: {}".format(type(data)))
-        return _upload_file_helper( # type: ignore
-            self,
-            stream,
-            length,
-            metadata,
-            content_settings,
-            validate_content,
-            timeout,
-            max_concurrency,
-            self._config,
-            file_attributes=file_attributes,
-            file_creation_time=file_creation_time,
-            file_last_write_time=file_last_write_time,
-            file_permission=file_permission,
-            file_permission_key=permission_key,
-            **kwargs)
-
-    @distributed_trace
-    def start_copy_from_url(self, source_url, **kwargs):
-        # type: (str, Any) -> Any
-        """Initiates the copying of data from a source URL into the file
-        referenced by the client.
-
-        The status of this copy operation can be found using the `get_properties`
-        method.
-
-        :param str source_url:
-            Specifies the URL of the source file.
-        :keyword str file_permission:
-            If specified the permission (security descriptor) shall be set for the directory/file.
-            This value can be set to "source" to copy the security descriptor from the source file.
-            Otherwise if set, this value will be used to override the source value. If not set, permission value
-            is inherited from the parent directory of the target file. This setting can be
-            used if Permission size is <= 8KB, otherwise permission_key shall be used.
-            If SDDL is specified as input, it must have owner, group and dacl.
-            Note: Only one of the file_permission or permission_key should be specified.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword str permission_key:
-            Key of the permission to be set for the directory/file.
-            This value can be set to "source" to copy the security descriptor from the source file.
-            Otherwise if set, this value will be used to override the source value. If not set, permission value
-            is inherited from the parent directory of the target file.
-            Note: Only one of the file_permission or permission_key should be specified.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword file_attributes:
-            This value can be set to "source" to copy file attributes from the source file to the target file,
-            or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes
-            to set on the target file. If this is not set, the default value is "Archive".
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :keyword file_creation_time:
-            This value can be set to "source" to copy the creation time from the source file to the target file,
-            or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format.
-            If this is not set, creation time will be set to the date time value of the creation
-            (or when it was overwritten) of the target file by copy engine.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :paramtype file_creation_time: str or ~datetime.datetime
-        :keyword file_last_write_time:
-            This value can be set to "source" to copy the last write time from the source file to the target file, or
-            a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format.
-            If this is not set, value will be the last write time to the file by the copy engine.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :paramtype file_last_write_time: str or ~datetime.datetime
-        :keyword bool ignore_read_only:
-            Specifies the option to overwrite the target file if it already exists and has read-only attribute set.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword bool set_archive_attribute:
-            Specifies the option to set the archive attribute on the target file.
-            True means the archive attribute will be set on the target file despite attribute
-            overrides or the source file state.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client.py
-                :start-after: [START copy_file_from_url]
-                :end-before: [END copy_file_from_url]
-                :language: python
-                :dedent: 12
-                :caption: Copy a file from a URL
-        """
-        metadata = kwargs.pop('metadata', None)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        kwargs.update(get_smb_properties(kwargs))
-        try:
-            return self._client.file.start_copy(
-                source_url,
-                metadata=metadata,
-                lease_access_conditions=access_conditions,
-                headers=headers,
-                cls=return_response_headers,
-                timeout=timeout,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def abort_copy(self, copy_id, **kwargs):
-        # type: (Union[str, FileProperties], Any) -> None
-        """Abort an ongoing copy operation.
-
-        This will leave a destination file with zero length and full metadata.
-        This will raise an error if the copy operation has already ended.
-
-        :param copy_id:
-            The copy operation to abort. This can be either an ID, or an
-            instance of FileProperties.
-        :type copy_id: str or ~azure.storage.fileshare.FileProperties
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            copy_id = copy_id.copy.id
-        except AttributeError:
-            try:
-                copy_id = copy_id['copy_id']
-            except TypeError:
-                pass
-        try:
-            self._client.file.abort_copy(copy_id=copy_id,
-                                         lease_access_conditions=access_conditions,
-                                         timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def download_file(
-            self, offset=None,  # type: Optional[int]
-            length=None,  # type: Optional[int]
-            **kwargs
-        ):
-        # type: (...) -> Iterable[bytes]
-        """Downloads a file to a stream with automatic chunking.
-
-        :param int offset:
-            Start of byte range to use for downloading a section of the file.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the file. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            file. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A iterable data generator (stream)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client.py
-                :start-after: [START download_file]
-                :end-before: [END download_file]
-                :language: python
-                :dedent: 12
-                :caption: Download a file.
-        """
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Encryption not supported.")
-        if length is not None and offset is None:
-            raise ValueError("Offset value must not be None if length is set.")
-
-        range_end = None
-        if length is not None:
-            range_end = offset + length - 1  # Service actually uses an end-range inclusive index
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-
-        return StorageStreamDownloader(
-            client=self._client.file,
-            config=self._config,
-            start_range=offset,
-            end_range=range_end,
-            encryption_options=None,
-            name=self.file_name,
-            path='/'.join(self.file_path),
-            share=self.share_name,
-            lease_access_conditions=access_conditions,
-            cls=deserialize_file_stream,
-            **kwargs)
-
-    @distributed_trace
-    def delete_file(self, **kwargs):
-        # type: (Any) -> None
-        """Marks the specified file for deletion. The file is
-        later deleted during garbage collection.
-
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client.py
-                :start-after: [START delete_file]
-                :end-before: [END delete_file]
-                :language: python
-                :dedent: 12
-                :caption: Delete a file.
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_file_properties(self, **kwargs):
-        # type: (Any) -> FileProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file.
-
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: FileProperties
-        :rtype: ~azure.storage.fileshare.FileProperties
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            file_props = self._client.file.get_properties(
-                sharesnapshot=self.snapshot,
-                lease_access_conditions=access_conditions,
-                timeout=timeout,
-                cls=deserialize_file_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        file_props.name = self.file_name
-        file_props.share = self.share_name
-        file_props.snapshot = self.snapshot
-        file_props.path = '/'.join(self.file_path)
-        return file_props # type: ignore
-
-    @distributed_trace
-    def set_http_headers(self, content_settings,  # type: ContentSettings
-                         file_attributes="preserve",  # type: Union[str, NTFSAttributes]
-                         file_creation_time="preserve",  # type: Union[str, datetime]
-                         file_last_write_time="preserve",  # type: Union[str, datetime]
-                         file_permission=None,  # type: Optional[str]
-                         permission_key=None,  # type: Optional[str]
-                         **kwargs  # type: Any
-                         ):
-        # type: (...) -> Dict[str, Any]
-        """Sets HTTP headers on the file.
-
-        :param ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, indicates preservation of existing values.
-            Here is an example for when the var type is str: 'Temporary|Archive'
-        :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :param file_creation_time: Creation time for the file
-            Default value: Preserve.
-        :type file_creation_time: str or ~datetime.datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Preserve.
-        :type file_last_write_time: str or ~datetime.datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        file_content_length = kwargs.pop('size', None)
-        file_http_headers = FileHTTPHeaders(
-            file_cache_control=content_settings.cache_control,
-            file_content_type=content_settings.content_type,
-            file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-            file_content_encoding=content_settings.content_encoding,
-            file_content_language=content_settings.content_language,
-            file_content_disposition=content_settings.content_disposition
-        )
-        file_permission = _get_file_permission(file_permission, permission_key, 'preserve')
-        try:
-            return self._client.file.set_http_headers(  # type: ignore
-                file_content_length=file_content_length,
-                file_http_headers=file_http_headers,
-                file_attributes=_str(file_attributes),
-                file_creation_time=_datetime_to_str(file_creation_time),
-                file_last_write_time=_datetime_to_str(file_last_write_time),
-                file_permission=file_permission,
-                file_permission_key=permission_key,
-                lease_access_conditions=access_conditions,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_file_metadata(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any]
-        """Sets user-defined metadata for the specified file as one or more
-        name-value pairs.
-
-        Each call to this operation replaces all existing metadata
-        attached to the file. To remove all metadata from the file,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return self._client.file.set_metadata( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                metadata=metadata,
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def upload_range(  # type: ignore
-            self, data,  # type: bytes
-            offset,  # type: int
-            length,  # type: int
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Upload a range of bytes to a file.
-
-        :param bytes data:
-            The data to upload.
-        :param int offset:
-            Start of byte range to use for uploading a section of the file.
-            The range can be up to 4 MB in size.
-        :param int length:
-            Number of bytes to use for uploading a section of the file.
-            The range can be up to 4 MB in size.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            file.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: Dict[str, Any]
-        """
-        validate_content = kwargs.pop('validate_content', False)
-        timeout = kwargs.pop('timeout', None)
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Encryption not supported.")
-        if isinstance(data, six.text_type):
-            data = data.encode(encoding)
-
-        end_range = offset + length - 1  # Reformat to an inclusive range index
-        content_range = 'bytes={0}-{1}'.format(offset, end_range)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        try:
-            return self._client.file.upload_range( # type: ignore
-                range=content_range,
-                content_length=length,
-                optionalbody=data,
-                timeout=timeout,
-                validate_content=validate_content,
-                lease_access_conditions=access_conditions,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @staticmethod
-    def _upload_range_from_url_options(source_url,  # type: str
-                                       offset,  # type: int
-                                       length,  # type: int
-                                       source_offset,  # type: int
-                                       **kwargs  # type: Any
-                                       ):
-        # type: (...) -> Dict[str, Any]
-
-        if offset is None:
-            raise ValueError("offset must be provided.")
-        if length is None:
-            raise ValueError("length must be provided.")
-        if source_offset is None:
-            raise ValueError("source_offset must be provided.")
-
-        # Format range
-        end_range = offset + length - 1
-        destination_range = 'bytes={0}-{1}'.format(offset, end_range)
-        source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1)
-
-        source_mod_conditions = get_source_conditions(kwargs)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-
-        options = {
-            'copy_source': source_url,
-            'content_length': 0,
-            'source_range': source_range,
-            'range': destination_range,
-            'source_modified_access_conditions': source_mod_conditions,
-            'lease_access_conditions': access_conditions,
-            'timeout': kwargs.pop('timeout', None),
-            'cls': return_response_headers}
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def upload_range_from_url(self, source_url,
-                              offset,
-                              length,
-                              source_offset,
-                              **kwargs
-                              ):
-        # type: (str, int, int, int, **Any) -> Dict[str, Any]
-        """
-        Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint.
-
-        :param int offset:
-            Start of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-        :param int length:
-            Number of bytes to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-        :param str source_url:
-            A URL of up to 2 KB in length that specifies an Azure file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.file.core.windows.net/myshare/mydir/myfile
-            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
-        :param int source_offset:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-            The service will read the same number of bytes as the destination range (length-offset).
-        :keyword ~datetime.datetime source_if_modified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source
-            blob has been modified since the specified date/time.
-        :keyword ~datetime.datetime source_if_unmodified_since:
-            A DateTime value. Azure expects the date value passed in to be UTC.
-            If timezone is included, any non-UTC datetimes will be converted to UTC.
-            If a date is passed in without timezone info, it is assumed to be UTC.
-            Specify this conditional header to copy the blob only if the source blob
-            has not been modified since the specified date/time.
-        :keyword str source_etag:
-            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
-            and act according to the condition specified by the `match_condition` parameter.
-        :keyword ~azure.core.MatchConditions source_match_condition:
-            The source match condition to use upon the etag.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        options = self._upload_range_from_url_options(
-            source_url=source_url,
-            offset=offset,
-            length=length,
-            source_offset=source_offset,
-            **kwargs
-        )
-        try:
-            return self._client.file.upload_range_from_url(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_ranges(  # type: ignore
-            self, offset=None,  # type: Optional[int]
-            length=None,  # type: Optional[int]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> List[Dict[str, int]]
-        """Returns the list of valid ranges of a file.
-
-        :param int offset:
-            Specifies the start offset of bytes over which to get ranges.
-        :param int length:
-           Number of bytes to use over which to get ranges.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A list of valid ranges.
-        :rtype: List[dict[str, int]]
-        """
-        timeout = kwargs.pop('timeout', None)
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Unsupported method for encryption.")
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-
-        content_range = None
-        if offset is not None:
-            if length is not None:
-                end_range = offset + length - 1  # Reformat to an inclusive range index
-                content_range = 'bytes={0}-{1}'.format(offset, end_range)
-            else:
-                content_range = 'bytes={0}-'.format(offset)
-        try:
-            ranges = self._client.file.get_range_list(
-                range=content_range,
-                sharesnapshot=self.snapshot,
-                lease_access_conditions=access_conditions,
-                timeout=timeout,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return [{'start': b.start, 'end': b.end} for b in ranges]
-
-    @distributed_trace
-    def clear_range( # type: ignore
-            self, offset,  # type: int
-            length,  # type: int
-            **kwargs
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Clears the specified range and releases the space used in storage for
-        that range.
-
-        :param int offset:
-            Start of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-        :param int length:
-            Number of bytes to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: Dict[str, Any]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Unsupported method for encryption.")
-
-        if offset is None or offset % 512 != 0:
-            raise ValueError("offset must be an integer that aligns with 512 bytes file size")
-        if length is None or length % 512 != 0:
-            raise ValueError("length must be an integer that aligns with 512 bytes file size")
-        end_range = length + offset - 1  # Reformat to an inclusive range index
-        content_range = 'bytes={0}-{1}'.format(offset, end_range)
-        try:
-            return self._client.file.upload_range( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                content_length=0,
-                file_range_write="clear",
-                range=content_range,
-                lease_access_conditions=access_conditions,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def resize_file(self, size, **kwargs):
-        # type: (int, Any) -> Dict[str, Any]
-        """Resizes a file to the specified size.
-
-        :param int size:
-            Size to resize file to (in bytes)
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: Dict[str, Any]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return self._client.file.set_http_headers( # type: ignore
-                file_content_length=size,
-                file_attributes="preserve",
-                file_creation_time="preserve",
-                file_last_write_time="preserve",
-                file_permission="preserve",
-                lease_access_conditions=access_conditions,
-                cls=return_response_headers,
-                timeout=timeout,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_handles(self, **kwargs):
-        # type: (Any) -> ItemPaged[Handle]
-        """Lists handles for file.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of HandleItem
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem]
-        """
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.file.list_handles,
-            sharesnapshot=self.snapshot,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-            command, results_per_page=results_per_page,
-            page_iterator_class=HandlesPaged)
-
-    @distributed_trace
-    def close_handle(self, handle, **kwargs):
-        # type: (Union[str, HandleItem], Any) -> Dict[str, int]
-        """Close an open file handle.
-
-        :param handle:
-            A specific handle to close.
-        :type handle: str or ~azure.storage.fileshare.Handle
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        try:
-            handle_id = handle.id # type: ignore
-        except AttributeError:
-            handle_id = handle
-        if handle_id == '*':
-            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
-        try:
-            response = self._client.file.force_close_handles(
-                handle_id,
-                marker=None,
-                sharesnapshot=self.snapshot,
-                cls=return_response_headers,
-                **kwargs
-            )
-            return {
-                'closed_handles_count': response.get('number_of_handles_closed', 0),
-                'failed_handles_count': response.get('number_of_handles_failed', 0)
-            }
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def close_all_handles(self, **kwargs):
-        # type: (Any) -> Dict[str, int]
-        """Close any open file handles.
-
-        This operation will block until the service has closed all open handles.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        timeout = kwargs.pop('timeout', None)
-        start_time = time.time()
-
-        try_close = True
-        continuation_token = None
-        total_closed = 0
-        total_failed = 0
-        while try_close:
-            try:
-                response = self._client.file.force_close_handles(
-                    handle_id='*',
-                    timeout=timeout,
-                    marker=continuation_token,
-                    sharesnapshot=self.snapshot,
-                    cls=return_response_headers,
-                    **kwargs
-                )
-            except StorageErrorException as error:
-                process_storage_error(error)
-            continuation_token = response.get('marker')
-            try_close = bool(continuation_token)
-            total_closed += response.get('number_of_handles_closed', 0)
-            total_failed += response.get('number_of_handles_failed', 0)
-            if timeout:
-                timeout = max(0, timeout - (time.time() - start_time))
-        return {
-            'closed_handles_count': total_closed,
-            'failed_handles_count': total_failed
-        }
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,18 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._azure_file_storage import AzureFileStorage
-__all__ = ['AzureFileStorage']
-
-from .version import VERSION
-
-__version__ = VERSION
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_azure_file_storage.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_azure_file_storage.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_azure_file_storage.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_azure_file_storage.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,71 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import PipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration import AzureFileStorageConfiguration
-from azure.core.exceptions import map_error
-from .operations import ServiceOperations
-from .operations import ShareOperations
-from .operations import DirectoryOperations
-from .operations import FileOperations
-from . import models
-
-
-class AzureFileStorage(object):
-    """AzureFileStorage
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.fileshare.operations.ServiceOperations
-    :ivar share: Share operations
-    :vartype share: azure.storage.fileshare.operations.ShareOperations
-    :ivar directory: Directory operations
-    :vartype directory: azure.storage.fileshare.operations.DirectoryOperations
-    :ivar file: File operations
-    :vartype file: azure.storage.fileshare.operations.FileOperations
-
-    :param version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    :param url: The URL of the service account, share, directory or file that
-     is the target of the desired operation.
-    :type url: str
-    """
-
-    def __init__(self, version, url, **kwargs):
-
-        base_url = '{url}'
-        self._config = AzureFileStorageConfiguration(version, url, **kwargs)
-        self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2019-07-07'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.share = ShareOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.directory = DirectoryOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.file = FileOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    def close(self):
-        self._client.close()
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-    def __exit__(self, *exc_details):
-        self._client.__exit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_configuration.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_configuration.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_configuration.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/_configuration.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,58 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from .version import VERSION
-
-
-class AzureFileStorageConfiguration(Configuration):
-    """Configuration for AzureFileStorage
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    :param url: The URL of the service account, share, directory or file that
-     is the target of the desired operation.
-    :type url: str
-    :ivar file_range_write_from_url: Only update is supported: - Update:
-     Writes the bytes downloaded from the source url into the specified range.
-    :type file_range_write_from_url: str
-    """
-
-    def __init__(self, version, url, **kwargs):
-
-        if version is None:
-            raise ValueError("Parameter 'version' must not be None.")
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-
-        super(AzureFileStorageConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION))
-        self.generate_client_request_id = True
-
-        self.version = version
-        self.url = url
-        self.file_range_write_from_url = "update"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._azure_file_storage_async import AzureFileStorage
-__all__ = ['AzureFileStorage']
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_azure_file_storage_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_azure_file_storage_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_azure_file_storage_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_azure_file_storage_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,72 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import AsyncPipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration_async import AzureFileStorageConfiguration
-from azure.core.exceptions import map_error
-from .operations_async import ServiceOperations
-from .operations_async import ShareOperations
-from .operations_async import DirectoryOperations
-from .operations_async import FileOperations
-from .. import models
-
-
-class AzureFileStorage(object):
-    """AzureFileStorage
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.fileshare.aio.operations_async.ServiceOperations
-    :ivar share: Share operations
-    :vartype share: azure.storage.fileshare.aio.operations_async.ShareOperations
-    :ivar directory: Directory operations
-    :vartype directory: azure.storage.fileshare.aio.operations_async.DirectoryOperations
-    :ivar file: File operations
-    :vartype file: azure.storage.fileshare.aio.operations_async.FileOperations
-
-    :param version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    :param url: The URL of the service account, share, directory or file that
-     is the target of the desired operation.
-    :type url: str
-    """
-
-    def __init__(
-            self, version, url, **kwargs):
-
-        base_url = '{url}'
-        self._config = AzureFileStorageConfiguration(version, url, **kwargs)
-        self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2019-07-07'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.share = ShareOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.directory = DirectoryOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.file = FileOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    async def close(self):
-        await self._client.close()
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-    async def __aexit__(self, *exc_details):
-        await self._client.__aexit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_configuration_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_configuration_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_configuration_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/_configuration_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,59 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from ..version import VERSION
-
-
-class AzureFileStorageConfiguration(Configuration):
-    """Configuration for AzureFileStorage
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    :param url: The URL of the service account, share, directory or file that
-     is the target of the desired operation.
-    :type url: str
-    :ivar file_range_write_from_url: Only update is supported: - Update:
-     Writes the bytes downloaded from the source url into the specified range.
-    :type file_range_write_from_url: str
-    """
-
-    def __init__(self, version, url, **kwargs):
-
-        if version is None:
-            raise ValueError("Parameter 'version' must not be None.")
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-
-        super(AzureFileStorageConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION))
-        self.generate_client_request_id = True
-        self.accept_language = None
-
-        self.version = version
-        self.url = url
-        self.file_range_write_from_url = "update"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations_async import ServiceOperations
-from ._share_operations_async import ShareOperations
-from ._directory_operations_async import DirectoryOperations
-from ._file_operations_async import FileOperations
-
-__all__ = [
-    'ServiceOperations',
-    'ShareOperations',
-    'DirectoryOperations',
-    'FileOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_directory_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,672 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class DirectoryOperations:
-    """DirectoryOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "directory".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "directory"
-
-    async def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs):
-        """Creates a new directory under the specified share or parent directory.
-
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs):
-        """Returns all system properties for the specified directory, and can also
-        be used to check the existence of a directory. The data returned does
-        not include the files in the directory or any subdirectories.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def delete(self, timeout=None, *, cls=None, **kwargs):
-        """Removes the specified empty directory. Note that the directory must be
-        empty before it can be deleted.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs):
-        """Sets properties on the directory.
-
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs):
-        """Updates user defined metadata for the specified directory.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, *, cls=None, **kwargs):
-        """Returns a list of files or directories under the specified share or
-        directory. It lists the contents only for a single level of the
-        directory hierarchy.
-
-        :param prefix: Filters the results to return only entries whose name
-         begins with the specified prefix.
-        :type prefix: str
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListFilesAndDirectoriesSegmentResponse or the result of
-         cls(response)
-        :rtype:
-         ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_files_and_directories_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs):
-        """Lists handles for directory.
-
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param recursive: Specifies operation should apply to the directory
-         specified in the URI, its files, its subdirectories and their files.
-        :type recursive: bool
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListHandlesResponse or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "listhandles"
-
-        # Construct URL
-        url = self.list_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if recursive is not None:
-            header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListHandlesResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_handles.metadata = {'url': '/{shareName}/{directory}'}
-
-    async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs):
-        """Closes all handles open for given directory.
-
-        :param handle_id: Specifies handle ID opened on the file or directory
-         to be closed. Asterix (‘*’) is a wildcard that specifies all handles.
-        :type handle_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param recursive: Specifies operation should apply to the directory
-         specified in the URI, its files, its subdirectories and their files.
-        :type recursive: bool
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "forceclosehandles"
-
-        # Construct URL
-        url = self.force_close_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str')
-        if recursive is not None:
-            header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')),
-                'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')),
-                'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    force_close_handles.metadata = {'url': '/{shareName}/{directory}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_file_operations_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_file_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_file_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_file_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1666 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class FileOperations:
-    """FileOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file".
-    :ivar x_ms_copy_action: . Constant value: "abort".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_type = "file"
-        self.x_ms_copy_action = "abort"
-
-    async def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Creates a new file or replaces a file. Note it only initializes the
-        file with no content.
-
-        :param file_content_length: Specifies the maximum size for the file,
-         up to 1 TB.
-        :type file_content_length: long
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param file_http_headers: Additional parameters for the operation
-        :type file_http_headers:
-         ~azure.storage.fileshare.models.FileHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        file_content_type = None
-        if file_http_headers is not None:
-            file_content_type = file_http_headers.file_content_type
-        file_content_encoding = None
-        if file_http_headers is not None:
-            file_content_encoding = file_http_headers.file_content_encoding
-        file_content_language = None
-        if file_http_headers is not None:
-            file_content_language = file_http_headers.file_content_language
-        file_cache_control = None
-        if file_http_headers is not None:
-            file_cache_control = file_http_headers.file_cache_control
-        file_content_md5 = None
-        if file_http_headers is not None:
-            file_content_md5 = file_http_headers.file_content_md5
-        file_content_disposition = None
-        if file_http_headers is not None:
-            file_content_disposition = file_http_headers.file_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long')
-        header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-        if file_content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str')
-        if file_content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str')
-        if file_content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str')
-        if file_cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str')
-        if file_content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray')
-        if file_content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Reads or downloads a file from the system, including its metadata and
-        properties.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param range: Return file data only from the specified byte range.
-        :type range: str
-        :param range_get_content_md5: When this header is set to true and
-         specified together with the Range header, the service returns the MD5
-         hash for the range, as long as the range is less than or equal to 4 MB
-         in size.
-        :type range_get_content_md5: bool
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.download.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        if range_get_content_md5 is not None:
-            header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 206]:
-            await response.load_body()
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-        if response.status_code == 206:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    download.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Returns all user-defined metadata, standard HTTP properties, and system
-        properties for the file. It does not return the content of the file.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def delete(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """removes the file from the storage account.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Sets HTTP headers on the file.
-
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param file_content_length: Resizes a file to the specified size. If
-         the specified byte value is less than the current size of the file,
-         then all ranges above the specified byte value are cleared.
-        :type file_content_length: long
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param file_http_headers: Additional parameters for the operation
-        :type file_http_headers:
-         ~azure.storage.fileshare.models.FileHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        file_content_type = None
-        if file_http_headers is not None:
-            file_content_type = file_http_headers.file_content_type
-        file_content_encoding = None
-        if file_http_headers is not None:
-            file_content_encoding = file_http_headers.file_content_encoding
-        file_content_language = None
-        if file_http_headers is not None:
-            file_content_language = file_http_headers.file_content_language
-        file_cache_control = None
-        if file_http_headers is not None:
-            file_cache_control = file_http_headers.file_cache_control
-        file_content_md5 = None
-        if file_http_headers is not None:
-            file_content_md5 = file_http_headers.file_content_md5
-        file_content_disposition = None
-        if file_http_headers is not None:
-            file_content_disposition = file_http_headers.file_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_http_headers.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if file_content_length is not None:
-            header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-        if file_content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str')
-        if file_content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str')
-        if file_content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str')
-        if file_cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str')
-        if file_content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray')
-        if file_content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Updates user-defined metadata for the specified file.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param duration: Specifies the duration of the lease, in seconds, or
-         negative one (-1) for a lease that never expires. A non-infinite lease
-         can be between 15 and 60 seconds. A lease duration cannot be changed
-         using renew or change.
-        :type duration: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The File service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "lease"
-        action = "acquire"
-
-        # Construct URL
-        url = self.acquire_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def release_lease(self, lease_id, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "lease"
-        action = "release"
-
-        # Construct URL
-        url = self.release_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The File service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "lease"
-        action = "change"
-
-        # Construct URL
-        url = self.change_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "lease"
-        action = "break"
-
-        # Construct URL
-        url = self.break_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Upload a range of bytes to a file.
-
-        :param range: Specifies the range of bytes to be written. Both the
-         start and end of the range must be specified. For an update operation,
-         the range can be up to 4 MB in size. For a clear operation, the range
-         can be up to the value of the file's full size. The File service
-         accepts only a single byte range for the Range and 'x-ms-range'
-         headers, and the byte range must be specified in the following format:
-         bytes=startByte-endByte.
-        :type range: str
-        :param file_range_write: Specify one of the following options: -
-         Update: Writes the bytes specified by the request body into the
-         specified range. The Range and Content-Length headers must match to
-         perform the update. - Clear: Clears the specified range and releases
-         the space used in storage for that range. To clear a range, set the
-         Content-Length header to zero, and set the Range header to a value
-         that indicates the range to clear, up to maximum file size. Possible
-         values include: 'update', 'clear'
-        :type file_range_write: str or
-         ~azure.storage.fileshare.models.FileRangeWriteType
-        :param content_length: Specifies the number of bytes being transmitted
-         in the request body. When the x-ms-write header is set to clear, the
-         value of this header must be set to zero.
-        :type content_length: long
-        :param optionalbody: Initial data.
-        :type optionalbody: Generator
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param content_md5: An MD5 hash of the content. This hash is used to
-         verify the integrity of the data during transport. When the
-         Content-MD5 header is specified, the File service compares the hash of
-         the content that has arrived with the header value that was sent. If
-         the two hashes do not match, the operation will fail with error code
-         400 (Bad Request).
-        :type content_md5: bytearray
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "range"
-
-        # Construct URL
-        url = self.upload_range.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Upload a range of bytes to a file where the contents are read from a
-        URL.
-
-        :param range: Writes data to the specified byte range in the file.
-        :type range: str
-        :param copy_source: Specifies the URL of the source file or blob, up
-         to 2 KB in length. To copy a file to another file within the same
-         storage account, you may use Shared Key to authenticate the source
-         file. If you are copying a file from another storage account, or if
-         you are copying a blob from the same storage account or another
-         storage account, then you must authenticate the source file or blob
-         using a shared access signature. If the source is a public blob, no
-         authentication is required to perform the copy operation. A file in a
-         share snapshot can also be specified as a copy source.
-        :type copy_source: str
-        :param content_length: Specifies the number of bytes being transmitted
-         in the request body. When the x-ms-write header is set to clear, the
-         value of this header must be set to zero.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param source_range: Bytes of source data in the specified range.
-        :type source_range: str
-        :param source_content_crc64: Specify the crc64 calculated for the
-         range of bytes that must be read from the copy source.
-        :type source_content_crc64: bytearray
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.fileshare.models.SourceModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        source_if_match_crc64 = None
-        if source_modified_access_conditions is not None:
-            source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64
-        source_if_none_match_crc64 = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "range"
-
-        # Construct URL
-        url = self.upload_range_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        if source_range is not None:
-            header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if source_content_crc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if source_if_match_crc64 is not None:
-            header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray')
-        if source_if_none_match_crc64 is not None:
-            header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def get_range_list(self, sharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Returns the list of valid ranges for a file.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param range: Specifies the range of bytes over which to list ranges,
-         inclusively.
-        :type range: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.fileshare.models.Range]
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "rangelist"
-
-        # Construct URL
-        url = self.get_range_list.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[Range]', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Copies a blob or file to a destination file within the storage account.
-
-        :param copy_source: Specifies the URL of the source file or blob, up
-         to 2 KB in length. To copy a file to another file within the same
-         storage account, you may use Shared Key to authenticate the source
-         file. If you are copying a file from another storage account, or if
-         you are copying a blob from the same storage account or another
-         storage account, then you must authenticate the source file or blob
-         using a shared access signature. If the source is a public blob, no
-         authentication is required to perform the copy operation. A file in a
-         share snapshot can also be specified as a copy source.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param copy_file_smb_info: Additional parameters for the operation
-        :type copy_file_smb_info:
-         ~azure.storage.fileshare.models.CopyFileSmbInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        file_permission_copy_mode = None
-        if copy_file_smb_info is not None:
-            file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode
-        ignore_read_only = None
-        if copy_file_smb_info is not None:
-            ignore_read_only = copy_file_smb_info.ignore_read_only
-        file_attributes = None
-        if copy_file_smb_info is not None:
-            file_attributes = copy_file_smb_info.file_attributes
-        file_creation_time = None
-        if copy_file_smb_info is not None:
-            file_creation_time = copy_file_smb_info.file_creation_time
-        file_last_write_time = None
-        if copy_file_smb_info is not None:
-            file_last_write_time = copy_file_smb_info.file_last_write_time
-        set_archive_attribute = None
-        if copy_file_smb_info is not None:
-            set_archive_attribute = copy_file_smb_info.set_archive_attribute
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.start_copy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        if file_permission_copy_mode is not None:
-            header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType')
-        if ignore_read_only is not None:
-            header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool')
-        if file_attributes is not None:
-            header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        if file_creation_time is not None:
-            header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        if file_last_write_time is not None:
-            header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-        if set_archive_attribute is not None:
-            header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs):
-        """Aborts a pending Copy File operation, and leaves a destination file
-        with zero length and full metadata.
-
-        :param copy_id: The copy identifier provided in the x-ms-copy-id
-         header of the original Copy File operation.
-        :type copy_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "copy"
-
-        # Construct URL
-        url = self.abort_copy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, *, cls=None, **kwargs):
-        """Lists handles for file.
-
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListHandlesResponse or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "listhandles"
-
-        # Construct URL
-        url = self.list_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListHandlesResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, *, cls=None, **kwargs):
-        """Closes all handles open for given file.
-
-        :param handle_id: Specifies handle ID opened on the file or directory
-         to be closed. Asterix (‘*’) is a wildcard that specifies all handles.
-        :type handle_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "forceclosehandles"
-
-        # Construct URL
-        url = self.force_close_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')),
-                'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')),
-                'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,253 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class ServiceOperations:
-    """ServiceOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "service".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "service"
-
-    async def set_properties(self, storage_service_properties, timeout=None, *, cls=None, **kwargs):
-        """Sets properties for a storage account's File service endpoint,
-        including properties for Storage Analytics metrics and CORS
-        (Cross-Origin Resource Sharing) rules.
-
-        :param storage_service_properties: The StorageService properties.
-        :type storage_service_properties:
-         ~azure.storage.fileshare.models.StorageServiceProperties
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/'}
-
-    async def get_properties(self, timeout=None, *, cls=None, **kwargs):
-        """Gets the properties of a storage account's File service, including
-        properties for Storage Analytics metrics and CORS (Cross-Origin
-        Resource Sharing) rules.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceProperties or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.StorageServiceProperties
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceProperties', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_properties.metadata = {'url': '/'}
-
-    async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, *, cls=None, **kwargs):
-        """The List Shares Segment operation returns a list of the shares and
-        share snapshots under the specified account.
-
-        :param prefix: Filters the results to return only entries whose name
-         begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param include: Include this parameter to specify one or more datasets
-         to include in the response.
-        :type include: list[str or
-         ~azure.storage.fileshare.models.ListSharesIncludeType]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListSharesResponse or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ListSharesResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_shares_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListSharesResponse', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_shares_segment.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_share_operations_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_share_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_share_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/aio/operations_async/_share_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,751 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class ShareOperations:
-    """ShareOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "share".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "share"
-
-    async def create(self, timeout=None, metadata=None, quota=None, *, cls=None, **kwargs):
-        """Creates a new share under the specified account. If the share with the
-        same name already exists, the operation fails.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param quota: Specifies the maximum size of the share, in gigabytes.
-        :type quota: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if quota is not None:
-            header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{shareName}'}
-
-    async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs):
-        """Returns all user-defined metadata and system properties for the
-        specified share or share snapshot. The data returned does not include
-        the share's list of files.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')),
-                'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')),
-                'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')),
-                'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')),
-                'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{shareName}'}
-
-    async def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, *, cls=None, **kwargs):
-        """Operation marks the specified share or share snapshot for deletion. The
-        share or share snapshot and any files contained within it are later
-        deleted during garbage collection.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param delete_snapshots: Specifies the option include to delete the
-         base share and all of its snapshots. Possible values include:
-         'include'
-        :type delete_snapshots: str or
-         ~azure.storage.fileshare.models.DeleteSnapshotsOptionType
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if delete_snapshots is not None:
-            header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{shareName}'}
-
-    async def create_snapshot(self, timeout=None, metadata=None, *, cls=None, **kwargs):
-        """Creates a read-only snapshot of a share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "snapshot"
-
-        # Construct URL
-        url = self.create_snapshot.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create_snapshot.metadata = {'url': '/{shareName}'}
-
-    async def create_permission(self, share_permission, timeout=None, *, cls=None, **kwargs):
-        """Create a permission (a security descriptor).
-
-        :param share_permission: A permission (a security descriptor) at the
-         share level.
-        :type share_permission:
-         ~azure.storage.fileshare.models.SharePermission
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "filepermission"
-
-        # Construct URL
-        url = self.create_permission.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False)
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create_permission.metadata = {'url': '/{shareName}'}
-
-    async def get_permission(self, file_permission_key, timeout=None, *, cls=None, **kwargs):
-        """Returns the permission (security descriptor) for a given key.
-
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file.
-        :type file_permission_key: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: SharePermission or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.SharePermission
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "filepermission"
-
-        # Construct URL
-        url = self.get_permission.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('SharePermission', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_permission.metadata = {'url': '/{shareName}'}
-
-    async def set_quota(self, timeout=None, quota=None, *, cls=None, **kwargs):
-        """Sets quota for the specified share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param quota: Specifies the maximum size of the share, in gigabytes.
-        :type quota: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_quota.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if quota is not None:
-            header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_quota.metadata = {'url': '/{shareName}'}
-
-    async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs):
-        """Sets one or more user-defined name-value pairs for the specified share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{shareName}'}
-
-    async def get_access_policy(self, timeout=None, *, cls=None, **kwargs):
-        """Returns information about stored access policies specified on the
-        share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.fileshare.models.SignedIdentifier]
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.get_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[SignedIdentifier]', response)
-            header_dict = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_access_policy.metadata = {'url': '/{shareName}'}
-
-    async def set_access_policy(self, share_acl=None, timeout=None, *, cls=None, **kwargs):
-        """Sets a stored access policy for use with shared access signatures.
-
-        :param share_acl: The ACL for the share.
-        :type share_acl:
-         list[~azure.storage.fileshare.models.SignedIdentifier]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.set_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct body
-        serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}}
-        if share_acl is not None:
-            body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_access_policy.metadata = {'url': '/{shareName}'}
-
-    async def get_statistics(self, timeout=None, *, cls=None, **kwargs):
-        """Retrieves statistics related to the share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ShareStats or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ShareStats
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "stats"
-
-        # Construct URL
-        url = self.get_statistics.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ShareStats', response)
-            header_dict = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_statistics.metadata = {'url': '/{shareName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,108 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-try:
-    from ._models_py3 import AccessPolicy
-    from ._models_py3 import CopyFileSmbInfo
-    from ._models_py3 import CorsRule
-    from ._models_py3 import DirectoryItem
-    from ._models_py3 import FileHTTPHeaders
-    from ._models_py3 import FileItem
-    from ._models_py3 import FileProperty
-    from ._models_py3 import FilesAndDirectoriesListSegment
-    from ._models_py3 import HandleItem
-    from ._models_py3 import LeaseAccessConditions
-    from ._models_py3 import ListFilesAndDirectoriesSegmentResponse
-    from ._models_py3 import ListHandlesResponse
-    from ._models_py3 import ListSharesResponse
-    from ._models_py3 import Metrics
-    from ._models_py3 import Range
-    from ._models_py3 import RetentionPolicy
-    from ._models_py3 import ShareItem
-    from ._models_py3 import SharePermission
-    from ._models_py3 import ShareProperties
-    from ._models_py3 import ShareStats
-    from ._models_py3 import SignedIdentifier
-    from ._models_py3 import SourceModifiedAccessConditions
-    from ._models_py3 import StorageError, StorageErrorException
-    from ._models_py3 import StorageServiceProperties
-except (SyntaxError, ImportError):
-    from ._models import AccessPolicy
-    from ._models import CopyFileSmbInfo
-    from ._models import CorsRule
-    from ._models import DirectoryItem
-    from ._models import FileHTTPHeaders
-    from ._models import FileItem
-    from ._models import FileProperty
-    from ._models import FilesAndDirectoriesListSegment
-    from ._models import HandleItem
-    from ._models import LeaseAccessConditions
-    from ._models import ListFilesAndDirectoriesSegmentResponse
-    from ._models import ListHandlesResponse
-    from ._models import ListSharesResponse
-    from ._models import Metrics
-    from ._models import Range
-    from ._models import RetentionPolicy
-    from ._models import ShareItem
-    from ._models import SharePermission
-    from ._models import ShareProperties
-    from ._models import ShareStats
-    from ._models import SignedIdentifier
-    from ._models import SourceModifiedAccessConditions
-    from ._models import StorageError, StorageErrorException
-    from ._models import StorageServiceProperties
-from ._azure_file_storage_enums import (
-    CopyStatusType,
-    DeleteSnapshotsOptionType,
-    FileRangeWriteType,
-    LeaseDurationType,
-    LeaseStateType,
-    LeaseStatusType,
-    ListSharesIncludeType,
-    PermissionCopyModeType,
-    StorageErrorCode,
-)
-
-__all__ = [
-    'AccessPolicy',
-    'CopyFileSmbInfo',
-    'CorsRule',
-    'DirectoryItem',
-    'FileHTTPHeaders',
-    'FileItem',
-    'FileProperty',
-    'FilesAndDirectoriesListSegment',
-    'HandleItem',
-    'LeaseAccessConditions',
-    'ListFilesAndDirectoriesSegmentResponse',
-    'ListHandlesResponse',
-    'ListSharesResponse',
-    'Metrics',
-    'Range',
-    'RetentionPolicy',
-    'ShareItem',
-    'SharePermission',
-    'ShareProperties',
-    'ShareStats',
-    'SignedIdentifier',
-    'SourceModifiedAccessConditions',
-    'StorageError', 'StorageErrorException',
-    'StorageServiceProperties',
-    'StorageErrorCode',
-    'PermissionCopyModeType',
-    'DeleteSnapshotsOptionType',
-    'ListSharesIncludeType',
-    'CopyStatusType',
-    'LeaseDurationType',
-    'LeaseStateType',
-    'LeaseStatusType',
-    'FileRangeWriteType',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_azure_file_storage_enums.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_azure_file_storage_enums.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_azure_file_storage_enums.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_azure_file_storage_enums.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,134 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-class StorageErrorCode(str, Enum):
-
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-    cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
-    client_cache_flush_delay = "ClientCacheFlushDelay"
-    delete_pending = "DeletePending"
-    directory_not_empty = "DirectoryNotEmpty"
-    file_lock_conflict = "FileLockConflict"
-    invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
-    parent_not_found = "ParentNotFound"
-    read_only_attribute = "ReadOnlyAttribute"
-    share_already_exists = "ShareAlreadyExists"
-    share_being_deleted = "ShareBeingDeleted"
-    share_disabled = "ShareDisabled"
-    share_not_found = "ShareNotFound"
-    sharing_violation = "SharingViolation"
-    share_snapshot_in_progress = "ShareSnapshotInProgress"
-    share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
-    share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
-    share_has_snapshots = "ShareHasSnapshots"
-    container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
-    authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch"
-    authorization_protocol_mismatch = "AuthorizationProtocolMismatch"
-    authorization_permission_mismatch = "AuthorizationPermissionMismatch"
-    authorization_service_mismatch = "AuthorizationServiceMismatch"
-    authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-
-
-class PermissionCopyModeType(str, Enum):
-
-    source = "source"
-    override = "override"
-
-
-class DeleteSnapshotsOptionType(str, Enum):
-
-    include = "include"
-
-
-class ListSharesIncludeType(str, Enum):
-
-    snapshots = "snapshots"
-    metadata = "metadata"
-
-
-class CopyStatusType(str, Enum):
-
-    pending = "pending"
-    success = "success"
-    aborted = "aborted"
-    failed = "failed"
-
-
-class LeaseDurationType(str, Enum):
-
-    infinite = "infinite"
-    fixed = "fixed"
-
-
-class LeaseStateType(str, Enum):
-
-    available = "available"
-    leased = "leased"
-    expired = "expired"
-    breaking = "breaking"
-    broken = "broken"
-
-
-class LeaseStatusType(str, Enum):
-
-    locked = "locked"
-    unlocked = "unlocked"
-
-
-class FileRangeWriteType(str, Enum):
-
-    update = "update"
-    clear = "clear"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,880 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class AccessPolicy(Model):
-    """An Access policy.
-
-    :param start: The date-time the policy is active.
-    :type start: str
-    :param expiry: The date-time the policy expires.
-    :type expiry: str
-    :param permission: The permissions for the ACL policy.
-    :type permission: str
-    """
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-        'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(AccessPolicy, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.expiry = kwargs.get('expiry', None)
-        self.permission = kwargs.get('permission', None)
-
-
-class CopyFileSmbInfo(Model):
-    """Additional parameters for start_copy operation.
-
-    :param file_permission_copy_mode: Specifies the option to copy file
-     security descriptor from source file or to set it using the value which is
-     defined by the header value of x-ms-file-permission or
-     x-ms-file-permission-key. Possible values include: 'source', 'override'
-    :type file_permission_copy_mode: str or
-     ~azure.storage.fileshare.models.PermissionCopyModeType
-    :param ignore_read_only: Specifies the option to overwrite the target file
-     if it already exists and has read-only attribute set.
-    :type ignore_read_only: bool
-    :param file_attributes: Specifies either the option to copy file
-     attributes from a source file(source) to a target file or a list of
-     attributes to set on a target file.
-    :type file_attributes: str
-    :param file_creation_time: Specifies either the option to copy file
-     creation time from a source file(source) to a target file or a time value
-     in ISO 8601 format to set as creation time on a target file.
-    :type file_creation_time: str
-    :param file_last_write_time: Specifies either the option to copy file last
-     write time from a source file(source) to a target file or a time value in
-     ISO 8601 format to set as last write time on a target file.
-    :type file_last_write_time: str
-    :param set_archive_attribute: Specifies the option to set archive
-     attribute on a target file. True means archive attribute will be set on a
-     target file despite attribute overrides or a source file state.
-    :type set_archive_attribute: bool
-    """
-
-    _attribute_map = {
-        'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}},
-        'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}},
-        'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}},
-        'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}},
-        'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}},
-        'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(CopyFileSmbInfo, self).__init__(**kwargs)
-        self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None)
-        self.ignore_read_only = kwargs.get('ignore_read_only', None)
-        self.file_attributes = kwargs.get('file_attributes', None)
-        self.file_creation_time = kwargs.get('file_creation_time', None)
-        self.file_last_write_time = kwargs.get('file_last_write_time', None)
-        self.set_archive_attribute = kwargs.get('set_archive_attribute', None)
-
-
-class CorsRule(Model):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param allowed_origins: Required. The origin domains that are permitted to
-     make a request against the storage service via CORS. The origin domain is
-     the domain from which the request originates. Note that the origin must be
-     an exact case-sensitive match with the origin that the user age sends to
-     the service. You can also use the wildcard character '*' to allow all
-     origin domains to make requests via CORS.
-    :type allowed_origins: str
-    :param allowed_methods: Required. The methods (HTTP request verbs) that
-     the origin domain may use for a CORS request. (comma separated)
-    :type allowed_methods: str
-    :param allowed_headers: Required. The request headers that the origin
-     domain may specify on the CORS request.
-    :type allowed_headers: str
-    :param exposed_headers: Required. The response headers that may be sent in
-     the response to the CORS request and exposed by the browser to the request
-     issuer.
-    :type exposed_headers: str
-    :param max_age_in_seconds: Required. The maximum amount time that a
-     browser should cache the preflight OPTIONS request.
-    :type max_age_in_seconds: int
-    """
-
-    _validation = {
-        'allowed_origins': {'required': True},
-        'allowed_methods': {'required': True},
-        'allowed_headers': {'required': True},
-        'exposed_headers': {'required': True},
-        'max_age_in_seconds': {'required': True, 'minimum': 0},
-    }
-
-    _attribute_map = {
-        'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
-        'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
-        'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
-        'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
-        'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(CorsRule, self).__init__(**kwargs)
-        self.allowed_origins = kwargs.get('allowed_origins', None)
-        self.allowed_methods = kwargs.get('allowed_methods', None)
-        self.allowed_headers = kwargs.get('allowed_headers', None)
-        self.exposed_headers = kwargs.get('exposed_headers', None)
-        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
-
-
-class DirectoryItem(Model):
-    """A listed directory item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    """
-
-    _validation = {
-        'name': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-    }
-    _xml_map = {
-        'name': 'Directory'
-    }
-
-    def __init__(self, **kwargs):
-        super(DirectoryItem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-
-
-class FileHTTPHeaders(Model):
-    """Additional parameters for a set of operations, such as: File_create,
-    File_set_http_headers.
-
-    :param file_content_type: Sets the MIME content type of the file. The
-     default type is 'application/octet-stream'.
-    :type file_content_type: str
-    :param file_content_encoding: Specifies which content encodings have been
-     applied to the file.
-    :type file_content_encoding: str
-    :param file_content_language: Specifies the natural languages used by this
-     resource.
-    :type file_content_language: str
-    :param file_cache_control: Sets the file's cache control. The File service
-     stores this value but does not use or modify it.
-    :type file_cache_control: str
-    :param file_content_md5: Sets the file's MD5 hash.
-    :type file_content_md5: bytearray
-    :param file_content_disposition: Sets the file's Content-Disposition
-     header.
-    :type file_content_disposition: str
-    """
-
-    _attribute_map = {
-        'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}},
-        'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}},
-        'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}},
-        'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}},
-        'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}},
-        'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(FileHTTPHeaders, self).__init__(**kwargs)
-        self.file_content_type = kwargs.get('file_content_type', None)
-        self.file_content_encoding = kwargs.get('file_content_encoding', None)
-        self.file_content_language = kwargs.get('file_content_language', None)
-        self.file_cache_control = kwargs.get('file_cache_control', None)
-        self.file_content_md5 = kwargs.get('file_content_md5', None)
-        self.file_content_disposition = kwargs.get('file_content_disposition', None)
-
-
-class FileItem(Model):
-    """A listed file item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param properties: Required.
-    :type properties: ~azure.storage.fileshare.models.FileProperty
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}},
-    }
-    _xml_map = {
-        'name': 'File'
-    }
-
-    def __init__(self, **kwargs):
-        super(FileItem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.properties = kwargs.get('properties', None)
-
-
-class FileProperty(Model):
-    """File properties.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param content_length: Required. Content length of the file. This value
-     may not be up-to-date since an SMB client may have modified the file
-     locally. The value of Content-Length may not reflect that fact until the
-     handle is closed or the op-lock is broken. To retrieve current property
-     values, call Get File Properties.
-    :type content_length: long
-    """
-
-    _validation = {
-        'content_length': {'required': True},
-    }
-
-    _attribute_map = {
-        'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(FileProperty, self).__init__(**kwargs)
-        self.content_length = kwargs.get('content_length', None)
-
-
-class FilesAndDirectoriesListSegment(Model):
-    """Abstract for entries that can be listed from Directory.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param directory_items: Required.
-    :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
-    :param file_items: Required.
-    :type file_items: list[~azure.storage.fileshare.models.FileItem]
-    """
-
-    _validation = {
-        'directory_items': {'required': True},
-        'file_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}},
-        'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}},
-    }
-    _xml_map = {
-        'name': 'Entries'
-    }
-
-    def __init__(self, **kwargs):
-        super(FilesAndDirectoriesListSegment, self).__init__(**kwargs)
-        self.directory_items = kwargs.get('directory_items', None)
-        self.file_items = kwargs.get('file_items', None)
-
-
-class HandleItem(Model):
-    """A listed Azure Storage handle item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param handle_id: Required. XSMB service handle ID
-    :type handle_id: str
-    :param path: Required. File or directory name including full path starting
-     from share root
-    :type path: str
-    :param file_id: Required. FileId uniquely identifies the file or
-     directory.
-    :type file_id: str
-    :param parent_id: ParentId uniquely identifies the parent directory of the
-     object.
-    :type parent_id: str
-    :param session_id: Required. SMB session ID in context of which the file
-     handle was opened
-    :type session_id: str
-    :param client_ip: Required. Client IP that opened the handle
-    :type client_ip: str
-    :param open_time: Required. Time when the session that previously opened
-     the handle has last been reconnected. (UTC)
-    :type open_time: datetime
-    :param last_reconnect_time: Time handle was last connected to (UTC)
-    :type last_reconnect_time: datetime
-    """
-
-    _validation = {
-        'handle_id': {'required': True},
-        'path': {'required': True},
-        'file_id': {'required': True},
-        'session_id': {'required': True},
-        'client_ip': {'required': True},
-        'open_time': {'required': True},
-    }
-
-    _attribute_map = {
-        'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}},
-        'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}},
-        'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}},
-        'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}},
-        'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}},
-        'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}},
-        'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}},
-        'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}},
-    }
-    _xml_map = {
-        'name': 'Handle'
-    }
-
-    def __init__(self, **kwargs):
-        super(HandleItem, self).__init__(**kwargs)
-        self.handle_id = kwargs.get('handle_id', None)
-        self.path = kwargs.get('path', None)
-        self.file_id = kwargs.get('file_id', None)
-        self.parent_id = kwargs.get('parent_id', None)
-        self.session_id = kwargs.get('session_id', None)
-        self.client_ip = kwargs.get('client_ip', None)
-        self.open_time = kwargs.get('open_time', None)
-        self.last_reconnect_time = kwargs.get('last_reconnect_time', None)
-
-
-class LeaseAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param lease_id: If specified, the operation only succeeds if the
-     resource's lease is active and matches this ID.
-    :type lease_id: str
-    """
-
-    _attribute_map = {
-        'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(LeaseAccessConditions, self).__init__(**kwargs)
-        self.lease_id = kwargs.get('lease_id', None)
-
-
-class ListFilesAndDirectoriesSegmentResponse(Model):
-    """An enumeration of directories and files.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param share_name: Required.
-    :type share_name: str
-    :param share_snapshot:
-    :type share_snapshot: str
-    :param directory_path: Required.
-    :type directory_path: str
-    :param prefix: Required.
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param segment: Required.
-    :type segment:
-     ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'share_name': {'required': True},
-        'directory_path': {'required': True},
-        'prefix': {'required': True},
-        'segment': {'required': True},
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}},
-        'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}},
-        'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = kwargs.get('service_endpoint', None)
-        self.share_name = kwargs.get('share_name', None)
-        self.share_snapshot = kwargs.get('share_snapshot', None)
-        self.directory_path = kwargs.get('directory_path', None)
-        self.prefix = kwargs.get('prefix', None)
-        self.marker = kwargs.get('marker', None)
-        self.max_results = kwargs.get('max_results', None)
-        self.segment = kwargs.get('segment', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class ListHandlesResponse(Model):
-    """An enumeration of handles.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param handle_list:
-    :type handle_list: list[~azure.storage.fileshare.models.HandleItem]
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListHandlesResponse, self).__init__(**kwargs)
-        self.handle_list = kwargs.get('handle_list', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class ListSharesResponse(Model):
-    """An enumeration of shares.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param share_items:
-    :type share_items: list[~azure.storage.fileshare.models.ShareItem]
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListSharesResponse, self).__init__(**kwargs)
-        self.service_endpoint = kwargs.get('service_endpoint', None)
-        self.prefix = kwargs.get('prefix', None)
-        self.marker = kwargs.get('marker', None)
-        self.max_results = kwargs.get('max_results', None)
-        self.share_items = kwargs.get('share_items', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class Metrics(Model):
-    """Storage Analytics metrics for file service.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: Required. The version of Storage Analytics to configure.
-    :type version: str
-    :param enabled: Required. Indicates whether metrics are enabled for the
-     File service.
-    :type enabled: bool
-    :param include_apis: Indicates whether metrics should generate summary
-     statistics for called API operations.
-    :type include_apis: bool
-    :param retention_policy:
-    :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
-    """
-
-    _validation = {
-        'version': {'required': True},
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(Metrics, self).__init__(**kwargs)
-        self.version = kwargs.get('version', None)
-        self.enabled = kwargs.get('enabled', None)
-        self.include_apis = kwargs.get('include_apis', None)
-        self.retention_policy = kwargs.get('retention_policy', None)
-
-
-class Range(Model):
-    """An Azure Storage file range.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required. Start of the range.
-    :type start: long
-    :param end: Required. End of the range.
-    :type end: long
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'end': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
-        'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
-    }
-    _xml_map = {
-        'name': 'Range'
-    }
-
-    def __init__(self, **kwargs):
-        super(Range, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.end = kwargs.get('end', None)
-
-
-class RetentionPolicy(Model):
-    """The retention policy.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether a retention policy is enabled
-     for the File service. If false, metrics data is retained, and the user is
-     responsible for deleting it.
-    :type enabled: bool
-    :param days: Indicates the number of days that metrics data should be
-     retained. All data older than this value will be deleted. Metrics data is
-     deleted on a best-effort basis after the retention period expires.
-    :type days: int
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-        'days': {'maximum': 365, 'minimum': 1},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(RetentionPolicy, self).__init__(**kwargs)
-        self.enabled = kwargs.get('enabled', None)
-        self.days = kwargs.get('days', None)
-
-
-class ShareItem(Model):
-    """A listed Azure Storage share item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param snapshot:
-    :type snapshot: str
-    :param properties: Required.
-    :type properties: ~azure.storage.fileshare.models.ShareProperties
-    :param metadata:
-    :type metadata: dict[str, str]
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
-        'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}},
-        'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Share'
-    }
-
-    def __init__(self, **kwargs):
-        super(ShareItem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.snapshot = kwargs.get('snapshot', None)
-        self.properties = kwargs.get('properties', None)
-        self.metadata = kwargs.get('metadata', None)
-
-
-class SharePermission(Model):
-    """A permission (a security descriptor) at the share level.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param permission: Required. The permission in the Security Descriptor
-     Definition Language (SDDL).
-    :type permission: str
-    """
-
-    _validation = {
-        'permission': {'required': True},
-    }
-
-    _attribute_map = {
-        'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(SharePermission, self).__init__(**kwargs)
-        self.permission = kwargs.get('permission', None)
-
-
-class ShareProperties(Model):
-    """Properties of a share.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param last_modified: Required.
-    :type last_modified: datetime
-    :param etag: Required.
-    :type etag: str
-    :param quota: Required.
-    :type quota: int
-    :param provisioned_iops:
-    :type provisioned_iops: int
-    :param provisioned_ingress_mbps:
-    :type provisioned_ingress_mbps: int
-    :param provisioned_egress_mbps:
-    :type provisioned_egress_mbps: int
-    :param next_allowed_quota_downgrade_time:
-    :type next_allowed_quota_downgrade_time: datetime
-    """
-
-    _validation = {
-        'last_modified': {'required': True},
-        'etag': {'required': True},
-        'quota': {'required': True},
-    }
-
-    _attribute_map = {
-        'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
-        'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
-        'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}},
-        'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}},
-        'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}},
-        'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}},
-        'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(ShareProperties, self).__init__(**kwargs)
-        self.last_modified = kwargs.get('last_modified', None)
-        self.etag = kwargs.get('etag', None)
-        self.quota = kwargs.get('quota', None)
-        self.provisioned_iops = kwargs.get('provisioned_iops', None)
-        self.provisioned_ingress_mbps = kwargs.get('provisioned_ingress_mbps', None)
-        self.provisioned_egress_mbps = kwargs.get('provisioned_egress_mbps', None)
-        self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None)
-
-
-class ShareStats(Model):
-    """Stats for the share.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param share_usage_bytes: Required. The approximate size of the data
-     stored in bytes. Note that this value may not include all recently created
-     or recently resized files.
-    :type share_usage_bytes: int
-    """
-
-    _validation = {
-        'share_usage_bytes': {'required': True},
-    }
-
-    _attribute_map = {
-        'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(ShareStats, self).__init__(**kwargs)
-        self.share_usage_bytes = kwargs.get('share_usage_bytes', None)
-
-
-class SignedIdentifier(Model):
-    """Signed identifier.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param id: Required. A unique id.
-    :type id: str
-    :param access_policy: The access policy.
-    :type access_policy: ~azure.storage.fileshare.models.AccessPolicy
-    """
-
-    _validation = {
-        'id': {'required': True},
-    }
-
-    _attribute_map = {
-        'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
-        'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(SignedIdentifier, self).__init__(**kwargs)
-        self.id = kwargs.get('id', None)
-        self.access_policy = kwargs.get('access_policy', None)
-
-
-class SourceModifiedAccessConditions(Model):
-    """Additional parameters for upload_range_from_url operation.
-
-    :param source_if_match_crc64: Specify the crc64 value to operate only on
-     range with a matching crc64 checksum.
-    :type source_if_match_crc64: bytearray
-    :param source_if_none_match_crc64: Specify the crc64 value to operate only
-     on range without a matching crc64 checksum.
-    :type source_if_none_match_crc64: bytearray
-    """
-
-    _attribute_map = {
-        'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}},
-        'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(SourceModifiedAccessConditions, self).__init__(**kwargs)
-        self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None)
-        self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None)
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param message:
-    :type message: str
-    """
-
-    _attribute_map = {
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageError, self).__init__(**kwargs)
-        self.message = kwargs.get('message', None)
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageServiceProperties(Model):
-    """Storage service properties.
-
-    :param hour_metrics: A summary of request statistics grouped by API in
-     hourly aggregates for files.
-    :type hour_metrics: ~azure.storage.fileshare.models.Metrics
-    :param minute_metrics: A summary of request statistics grouped by API in
-     minute aggregates for files.
-    :type minute_metrics: ~azure.storage.fileshare.models.Metrics
-    :param cors: The set of CORS rules.
-    :type cors: list[~azure.storage.fileshare.models.CorsRule]
-    """
-
-    _attribute_map = {
-        'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
-        'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
-        'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageServiceProperties, self).__init__(**kwargs)
-        self.hour_metrics = kwargs.get('hour_metrics', None)
-        self.minute_metrics = kwargs.get('minute_metrics', None)
-        self.cors = kwargs.get('cors', None)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models_py3.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models_py3.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models_py3.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/models/_models_py3.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,880 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class AccessPolicy(Model):
-    """An Access policy.
-
-    :param start: The date-time the policy is active.
-    :type start: str
-    :param expiry: The date-time the policy expires.
-    :type expiry: str
-    :param permission: The permissions for the ACL policy.
-    :type permission: str
-    """
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-        'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None:
-        super(AccessPolicy, self).__init__(**kwargs)
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class CopyFileSmbInfo(Model):
-    """Additional parameters for start_copy operation.
-
-    :param file_permission_copy_mode: Specifies the option to copy file
-     security descriptor from source file or to set it using the value which is
-     defined by the header value of x-ms-file-permission or
-     x-ms-file-permission-key. Possible values include: 'source', 'override'
-    :type file_permission_copy_mode: str or
-     ~azure.storage.fileshare.models.PermissionCopyModeType
-    :param ignore_read_only: Specifies the option to overwrite the target file
-     if it already exists and has read-only attribute set.
-    :type ignore_read_only: bool
-    :param file_attributes: Specifies either the option to copy file
-     attributes from a source file(source) to a target file or a list of
-     attributes to set on a target file.
-    :type file_attributes: str
-    :param file_creation_time: Specifies either the option to copy file
-     creation time from a source file(source) to a target file or a time value
-     in ISO 8601 format to set as creation time on a target file.
-    :type file_creation_time: str
-    :param file_last_write_time: Specifies either the option to copy file last
-     write time from a source file(source) to a target file or a time value in
-     ISO 8601 format to set as last write time on a target file.
-    :type file_last_write_time: str
-    :param set_archive_attribute: Specifies the option to set archive
-     attribute on a target file. True means archive attribute will be set on a
-     target file despite attribute overrides or a source file state.
-    :type set_archive_attribute: bool
-    """
-
-    _attribute_map = {
-        'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}},
-        'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}},
-        'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}},
-        'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}},
-        'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}},
-        'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, file_permission_copy_mode=None, ignore_read_only: bool=None, file_attributes: str=None, file_creation_time: str=None, file_last_write_time: str=None, set_archive_attribute: bool=None, **kwargs) -> None:
-        super(CopyFileSmbInfo, self).__init__(**kwargs)
-        self.file_permission_copy_mode = file_permission_copy_mode
-        self.ignore_read_only = ignore_read_only
-        self.file_attributes = file_attributes
-        self.file_creation_time = file_creation_time
-        self.file_last_write_time = file_last_write_time
-        self.set_archive_attribute = set_archive_attribute
-
-
-class CorsRule(Model):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param allowed_origins: Required. The origin domains that are permitted to
-     make a request against the storage service via CORS. The origin domain is
-     the domain from which the request originates. Note that the origin must be
-     an exact case-sensitive match with the origin that the user age sends to
-     the service. You can also use the wildcard character '*' to allow all
-     origin domains to make requests via CORS.
-    :type allowed_origins: str
-    :param allowed_methods: Required. The methods (HTTP request verbs) that
-     the origin domain may use for a CORS request. (comma separated)
-    :type allowed_methods: str
-    :param allowed_headers: Required. The request headers that the origin
-     domain may specify on the CORS request.
-    :type allowed_headers: str
-    :param exposed_headers: Required. The response headers that may be sent in
-     the response to the CORS request and exposed by the browser to the request
-     issuer.
-    :type exposed_headers: str
-    :param max_age_in_seconds: Required. The maximum amount time that a
-     browser should cache the preflight OPTIONS request.
-    :type max_age_in_seconds: int
-    """
-
-    _validation = {
-        'allowed_origins': {'required': True},
-        'allowed_methods': {'required': True},
-        'allowed_headers': {'required': True},
-        'exposed_headers': {'required': True},
-        'max_age_in_seconds': {'required': True, 'minimum': 0},
-    }
-
-    _attribute_map = {
-        'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
-        'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
-        'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
-        'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
-        'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None:
-        super(CorsRule, self).__init__(**kwargs)
-        self.allowed_origins = allowed_origins
-        self.allowed_methods = allowed_methods
-        self.allowed_headers = allowed_headers
-        self.exposed_headers = exposed_headers
-        self.max_age_in_seconds = max_age_in_seconds
-
-
-class DirectoryItem(Model):
-    """A listed directory item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    """
-
-    _validation = {
-        'name': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-    }
-    _xml_map = {
-        'name': 'Directory'
-    }
-
-    def __init__(self, *, name: str, **kwargs) -> None:
-        super(DirectoryItem, self).__init__(**kwargs)
-        self.name = name
-
-
-class FileHTTPHeaders(Model):
-    """Additional parameters for a set of operations, such as: File_create,
-    File_set_http_headers.
-
-    :param file_content_type: Sets the MIME content type of the file. The
-     default type is 'application/octet-stream'.
-    :type file_content_type: str
-    :param file_content_encoding: Specifies which content encodings have been
-     applied to the file.
-    :type file_content_encoding: str
-    :param file_content_language: Specifies the natural languages used by this
-     resource.
-    :type file_content_language: str
-    :param file_cache_control: Sets the file's cache control. The File service
-     stores this value but does not use or modify it.
-    :type file_cache_control: str
-    :param file_content_md5: Sets the file's MD5 hash.
-    :type file_content_md5: bytearray
-    :param file_content_disposition: Sets the file's Content-Disposition
-     header.
-    :type file_content_disposition: str
-    """
-
-    _attribute_map = {
-        'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}},
-        'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}},
-        'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}},
-        'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}},
-        'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}},
-        'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None:
-        super(FileHTTPHeaders, self).__init__(**kwargs)
-        self.file_content_type = file_content_type
-        self.file_content_encoding = file_content_encoding
-        self.file_content_language = file_content_language
-        self.file_cache_control = file_cache_control
-        self.file_content_md5 = file_content_md5
-        self.file_content_disposition = file_content_disposition
-
-
-class FileItem(Model):
-    """A listed file item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param properties: Required.
-    :type properties: ~azure.storage.fileshare.models.FileProperty
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}},
-    }
-    _xml_map = {
-        'name': 'File'
-    }
-
-    def __init__(self, *, name: str, properties, **kwargs) -> None:
-        super(FileItem, self).__init__(**kwargs)
-        self.name = name
-        self.properties = properties
-
-
-class FileProperty(Model):
-    """File properties.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param content_length: Required. Content length of the file. This value
-     may not be up-to-date since an SMB client may have modified the file
-     locally. The value of Content-Length may not reflect that fact until the
-     handle is closed or the op-lock is broken. To retrieve current property
-     values, call Get File Properties.
-    :type content_length: long
-    """
-
-    _validation = {
-        'content_length': {'required': True},
-    }
-
-    _attribute_map = {
-        'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, content_length: int, **kwargs) -> None:
-        super(FileProperty, self).__init__(**kwargs)
-        self.content_length = content_length
-
-
-class FilesAndDirectoriesListSegment(Model):
-    """Abstract for entries that can be listed from Directory.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param directory_items: Required.
-    :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
-    :param file_items: Required.
-    :type file_items: list[~azure.storage.fileshare.models.FileItem]
-    """
-
-    _validation = {
-        'directory_items': {'required': True},
-        'file_items': {'required': True},
-    }
-
-    _attribute_map = {
-        'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}},
-        'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}},
-    }
-    _xml_map = {
-        'name': 'Entries'
-    }
-
-    def __init__(self, *, directory_items, file_items, **kwargs) -> None:
-        super(FilesAndDirectoriesListSegment, self).__init__(**kwargs)
-        self.directory_items = directory_items
-        self.file_items = file_items
-
-
-class HandleItem(Model):
-    """A listed Azure Storage handle item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param handle_id: Required. XSMB service handle ID
-    :type handle_id: str
-    :param path: Required. File or directory name including full path starting
-     from share root
-    :type path: str
-    :param file_id: Required. FileId uniquely identifies the file or
-     directory.
-    :type file_id: str
-    :param parent_id: ParentId uniquely identifies the parent directory of the
-     object.
-    :type parent_id: str
-    :param session_id: Required. SMB session ID in context of which the file
-     handle was opened
-    :type session_id: str
-    :param client_ip: Required. Client IP that opened the handle
-    :type client_ip: str
-    :param open_time: Required. Time when the session that previously opened
-     the handle has last been reconnected. (UTC)
-    :type open_time: datetime
-    :param last_reconnect_time: Time handle was last connected to (UTC)
-    :type last_reconnect_time: datetime
-    """
-
-    _validation = {
-        'handle_id': {'required': True},
-        'path': {'required': True},
-        'file_id': {'required': True},
-        'session_id': {'required': True},
-        'client_ip': {'required': True},
-        'open_time': {'required': True},
-    }
-
-    _attribute_map = {
-        'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}},
-        'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}},
-        'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}},
-        'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}},
-        'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}},
-        'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}},
-        'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}},
-        'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}},
-    }
-    _xml_map = {
-        'name': 'Handle'
-    }
-
-    def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None:
-        super(HandleItem, self).__init__(**kwargs)
-        self.handle_id = handle_id
-        self.path = path
-        self.file_id = file_id
-        self.parent_id = parent_id
-        self.session_id = session_id
-        self.client_ip = client_ip
-        self.open_time = open_time
-        self.last_reconnect_time = last_reconnect_time
-
-
-class LeaseAccessConditions(Model):
-    """Additional parameters for a set of operations.
-
-    :param lease_id: If specified, the operation only succeeds if the
-     resource's lease is active and matches this ID.
-    :type lease_id: str
-    """
-
-    _attribute_map = {
-        'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, lease_id: str=None, **kwargs) -> None:
-        super(LeaseAccessConditions, self).__init__(**kwargs)
-        self.lease_id = lease_id
-
-
-class ListFilesAndDirectoriesSegmentResponse(Model):
-    """An enumeration of directories and files.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param share_name: Required.
-    :type share_name: str
-    :param share_snapshot:
-    :type share_snapshot: str
-    :param directory_path: Required.
-    :type directory_path: str
-    :param prefix: Required.
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param segment: Required.
-    :type segment:
-     ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'share_name': {'required': True},
-        'directory_path': {'required': True},
-        'prefix': {'required': True},
-        'segment': {'required': True},
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}},
-        'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}},
-        'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None:
-        super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = service_endpoint
-        self.share_name = share_name
-        self.share_snapshot = share_snapshot
-        self.directory_path = directory_path
-        self.prefix = prefix
-        self.marker = marker
-        self.max_results = max_results
-        self.segment = segment
-        self.next_marker = next_marker
-
-
-class ListHandlesResponse(Model):
-    """An enumeration of handles.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param handle_list:
-    :type handle_list: list[~azure.storage.fileshare.models.HandleItem]
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None:
-        super(ListHandlesResponse, self).__init__(**kwargs)
-        self.handle_list = handle_list
-        self.next_marker = next_marker
-
-
-class ListSharesResponse(Model):
-    """An enumeration of shares.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param prefix:
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results:
-    :type max_results: int
-    :param share_items:
-    :type share_items: list[~azure.storage.fileshare.models.ShareItem]
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None:
-        super(ListSharesResponse, self).__init__(**kwargs)
-        self.service_endpoint = service_endpoint
-        self.prefix = prefix
-        self.marker = marker
-        self.max_results = max_results
-        self.share_items = share_items
-        self.next_marker = next_marker
-
-
-class Metrics(Model):
-    """Storage Analytics metrics for file service.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: Required. The version of Storage Analytics to configure.
-    :type version: str
-    :param enabled: Required. Indicates whether metrics are enabled for the
-     File service.
-    :type enabled: bool
-    :param include_apis: Indicates whether metrics should generate summary
-     statistics for called API operations.
-    :type include_apis: bool
-    :param retention_policy:
-    :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
-    """
-
-    _validation = {
-        'version': {'required': True},
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None:
-        super(Metrics, self).__init__(**kwargs)
-        self.version = version
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy
-
-
-class Range(Model):
-    """An Azure Storage file range.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required. Start of the range.
-    :type start: long
-    :param end: Required. End of the range.
-    :type end: long
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'end': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
-        'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
-    }
-    _xml_map = {
-        'name': 'Range'
-    }
-
-    def __init__(self, *, start: int, end: int, **kwargs) -> None:
-        super(Range, self).__init__(**kwargs)
-        self.start = start
-        self.end = end
-
-
-class RetentionPolicy(Model):
-    """The retention policy.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether a retention policy is enabled
-     for the File service. If false, metrics data is retained, and the user is
-     responsible for deleting it.
-    :type enabled: bool
-    :param days: Indicates the number of days that metrics data should be
-     retained. All data older than this value will be deleted. Metrics data is
-     deleted on a best-effort basis after the retention period expires.
-    :type days: int
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-        'days': {'maximum': 365, 'minimum': 1},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None:
-        super(RetentionPolicy, self).__init__(**kwargs)
-        self.enabled = enabled
-        self.days = days
-
-
-class ShareItem(Model):
-    """A listed Azure Storage share item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required.
-    :type name: str
-    :param snapshot:
-    :type snapshot: str
-    :param properties: Required.
-    :type properties: ~azure.storage.fileshare.models.ShareProperties
-    :param metadata:
-    :type metadata: dict[str, str]
-    """
-
-    _validation = {
-        'name': {'required': True},
-        'properties': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
-        'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}},
-        'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Share'
-    }
-
-    def __init__(self, *, name: str, properties, snapshot: str=None, metadata=None, **kwargs) -> None:
-        super(ShareItem, self).__init__(**kwargs)
-        self.name = name
-        self.snapshot = snapshot
-        self.properties = properties
-        self.metadata = metadata
-
-
-class SharePermission(Model):
-    """A permission (a security descriptor) at the share level.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param permission: Required. The permission in the Security Descriptor
-     Definition Language (SDDL).
-    :type permission: str
-    """
-
-    _validation = {
-        'permission': {'required': True},
-    }
-
-    _attribute_map = {
-        'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, permission: str, **kwargs) -> None:
-        super(SharePermission, self).__init__(**kwargs)
-        self.permission = permission
-
-
-class ShareProperties(Model):
-    """Properties of a share.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param last_modified: Required.
-    :type last_modified: datetime
-    :param etag: Required.
-    :type etag: str
-    :param quota: Required.
-    :type quota: int
-    :param provisioned_iops:
-    :type provisioned_iops: int
-    :param provisioned_ingress_mbps:
-    :type provisioned_ingress_mbps: int
-    :param provisioned_egress_mbps:
-    :type provisioned_egress_mbps: int
-    :param next_allowed_quota_downgrade_time:
-    :type next_allowed_quota_downgrade_time: datetime
-    """
-
-    _validation = {
-        'last_modified': {'required': True},
-        'etag': {'required': True},
-        'quota': {'required': True},
-    }
-
-    _attribute_map = {
-        'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
-        'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
-        'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}},
-        'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}},
-        'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}},
-        'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}},
-        'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, last_modified, etag: str, quota: int, provisioned_iops: int=None, provisioned_ingress_mbps: int=None, provisioned_egress_mbps: int=None, next_allowed_quota_downgrade_time=None, **kwargs) -> None:
-        super(ShareProperties, self).__init__(**kwargs)
-        self.last_modified = last_modified
-        self.etag = etag
-        self.quota = quota
-        self.provisioned_iops = provisioned_iops
-        self.provisioned_ingress_mbps = provisioned_ingress_mbps
-        self.provisioned_egress_mbps = provisioned_egress_mbps
-        self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time
-
-
-class ShareStats(Model):
-    """Stats for the share.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param share_usage_bytes: Required. The approximate size of the data
-     stored in bytes. Note that this value may not include all recently created
-     or recently resized files.
-    :type share_usage_bytes: int
-    """
-
-    _validation = {
-        'share_usage_bytes': {'required': True},
-    }
-
-    _attribute_map = {
-        'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, share_usage_bytes: int, **kwargs) -> None:
-        super(ShareStats, self).__init__(**kwargs)
-        self.share_usage_bytes = share_usage_bytes
-
-
-class SignedIdentifier(Model):
-    """Signed identifier.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param id: Required. A unique id.
-    :type id: str
-    :param access_policy: The access policy.
-    :type access_policy: ~azure.storage.fileshare.models.AccessPolicy
-    """
-
-    _validation = {
-        'id': {'required': True},
-    }
-
-    _attribute_map = {
-        'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
-        'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, id: str, access_policy=None, **kwargs) -> None:
-        super(SignedIdentifier, self).__init__(**kwargs)
-        self.id = id
-        self.access_policy = access_policy
-
-
-class SourceModifiedAccessConditions(Model):
-    """Additional parameters for upload_range_from_url operation.
-
-    :param source_if_match_crc64: Specify the crc64 value to operate only on
-     range with a matching crc64 checksum.
-    :type source_if_match_crc64: bytearray
-    :param source_if_none_match_crc64: Specify the crc64 value to operate only
-     on range without a matching crc64 checksum.
-    :type source_if_none_match_crc64: bytearray
-    """
-
-    _attribute_map = {
-        'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}},
-        'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None:
-        super(SourceModifiedAccessConditions, self).__init__(**kwargs)
-        self.source_if_match_crc64 = source_if_match_crc64
-        self.source_if_none_match_crc64 = source_if_none_match_crc64
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param message:
-    :type message: str
-    """
-
-    _attribute_map = {
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, message: str=None, **kwargs) -> None:
-        super(StorageError, self).__init__(**kwargs)
-        self.message = message
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageServiceProperties(Model):
-    """Storage service properties.
-
-    :param hour_metrics: A summary of request statistics grouped by API in
-     hourly aggregates for files.
-    :type hour_metrics: ~azure.storage.fileshare.models.Metrics
-    :param minute_metrics: A summary of request statistics grouped by API in
-     minute aggregates for files.
-    :type minute_metrics: ~azure.storage.fileshare.models.Metrics
-    :param cors: The set of CORS rules.
-    :type cors: list[~azure.storage.fileshare.models.CorsRule]
-    """
-
-    _attribute_map = {
-        'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
-        'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
-        'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None:
-        super(StorageServiceProperties, self).__init__(**kwargs)
-        self.hour_metrics = hour_metrics
-        self.minute_metrics = minute_metrics
-        self.cors = cors
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations import ServiceOperations
-from ._share_operations import ShareOperations
-from ._directory_operations import DirectoryOperations
-from ._file_operations import FileOperations
-
-__all__ = [
-    'ServiceOperations',
-    'ShareOperations',
-    'DirectoryOperations',
-    'FileOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_directory_operations.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_directory_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_directory_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_directory_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,672 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class DirectoryOperations(object):
-    """DirectoryOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "directory".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "directory"
-
-    def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs):
-        """Creates a new directory under the specified share or parent directory.
-
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{shareName}/{directory}'}
-
-    def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs):
-        """Returns all system properties for the specified directory, and can also
-        be used to check the existence of a directory. The data returned does
-        not include the files in the directory or any subdirectories.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{shareName}/{directory}'}
-
-    def delete(self, timeout=None, cls=None, **kwargs):
-        """Removes the specified empty directory. Note that the directory must be
-        empty before it can be deleted.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{shareName}/{directory}'}
-
-    def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs):
-        """Sets properties on the directory.
-
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/{shareName}/{directory}'}
-
-    def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs):
-        """Updates user defined metadata for the specified directory.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{shareName}/{directory}'}
-
-    def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, cls=None, **kwargs):
-        """Returns a list of files or directories under the specified share or
-        directory. It lists the contents only for a single level of the
-        directory hierarchy.
-
-        :param prefix: Filters the results to return only entries whose name
-         begins with the specified prefix.
-        :type prefix: str
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListFilesAndDirectoriesSegmentResponse or the result of
-         cls(response)
-        :rtype:
-         ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_files_and_directories_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'}
-
-    def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, cls=None, **kwargs):
-        """Lists handles for directory.
-
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param recursive: Specifies operation should apply to the directory
-         specified in the URI, its files, its subdirectories and their files.
-        :type recursive: bool
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListHandlesResponse or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "listhandles"
-
-        # Construct URL
-        url = self.list_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        if recursive is not None:
-            header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListHandlesResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_handles.metadata = {'url': '/{shareName}/{directory}'}
-
-    def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, cls=None, **kwargs):
-        """Closes all handles open for given directory.
-
-        :param handle_id: Specifies handle ID opened on the file or directory
-         to be closed. Asterix (‘*’) is a wildcard that specifies all handles.
-        :type handle_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param recursive: Specifies operation should apply to the directory
-         specified in the URI, its files, its subdirectories and their files.
-        :type recursive: bool
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "forceclosehandles"
-
-        # Construct URL
-        url = self.force_close_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str')
-        if recursive is not None:
-            header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')),
-                'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')),
-                'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    force_close_handles.metadata = {'url': '/{shareName}/{directory}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_file_operations.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_file_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_file_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_file_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1665 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class FileOperations(object):
-    """FileOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file".
-    :ivar x_ms_copy_action: . Constant value: "abort".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.x_ms_type = "file"
-        self.x_ms_copy_action = "abort"
-
-    def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Creates a new file or replaces a file. Note it only initializes the
-        file with no content.
-
-        :param file_content_length: Specifies the maximum size for the file,
-         up to 1 TB.
-        :type file_content_length: long
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param file_http_headers: Additional parameters for the operation
-        :type file_http_headers:
-         ~azure.storage.fileshare.models.FileHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        file_content_type = None
-        if file_http_headers is not None:
-            file_content_type = file_http_headers.file_content_type
-        file_content_encoding = None
-        if file_http_headers is not None:
-            file_content_encoding = file_http_headers.file_content_encoding
-        file_content_language = None
-        if file_http_headers is not None:
-            file_content_language = file_http_headers.file_content_language
-        file_cache_control = None
-        if file_http_headers is not None:
-            file_cache_control = file_http_headers.file_cache_control
-        file_content_md5 = None
-        if file_http_headers is not None:
-            file_content_md5 = file_http_headers.file_content_md5
-        file_content_disposition = None
-        if file_http_headers is not None:
-            file_content_disposition = file_http_headers.file_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long')
-        header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-        if file_content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str')
-        if file_content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str')
-        if file_content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str')
-        if file_cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str')
-        if file_content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray')
-        if file_content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Reads or downloads a file from the system, including its metadata and
-        properties.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param range: Return file data only from the specified byte range.
-        :type range: str
-        :param range_get_content_md5: When this header is set to true and
-         specified together with the Range header, the service returns the MD5
-         hash for the range, as long as the range is less than or equal to 4 MB
-         in size.
-        :type range_get_content_md5: bool
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: object or the result of cls(response)
-        :rtype: Generator
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.download.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        if range_get_content_md5 is not None:
-            header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200, 206]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-        if response.status_code == 206:
-            deserialized = response.stream_download(self._client._pipeline)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    download.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Returns all user-defined metadata, standard HTTP properties, and system
-        properties for the file. It does not return the content of the file.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.head(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')),
-                'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
-                'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
-                'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
-                'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
-                'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
-                'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
-                'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
-                'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def delete(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
-        """removes the file from the storage account.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Sets HTTP headers on the file.
-
-        :param file_attributes: If specified, the provided file attributes
-         shall be set. Default value: ‘Archive’ for file and ‘Directory’ for
-         directory. ‘None’ can also be specified as default.
-        :type file_attributes: str
-        :param file_creation_time: Creation time for the file/directory.
-         Default value: Now.
-        :type file_creation_time: str
-        :param file_last_write_time: Last write time for the file/directory.
-         Default value: Now.
-        :type file_last_write_time: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param file_content_length: Resizes a file to the specified size. If
-         the specified byte value is less than the current size of the file,
-         then all ranges above the specified byte value are cleared.
-        :type file_content_length: long
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param file_http_headers: Additional parameters for the operation
-        :type file_http_headers:
-         ~azure.storage.fileshare.models.FileHTTPHeaders
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        file_content_type = None
-        if file_http_headers is not None:
-            file_content_type = file_http_headers.file_content_type
-        file_content_encoding = None
-        if file_http_headers is not None:
-            file_content_encoding = file_http_headers.file_content_encoding
-        file_content_language = None
-        if file_http_headers is not None:
-            file_content_language = file_http_headers.file_content_language
-        file_cache_control = None
-        if file_http_headers is not None:
-            file_cache_control = file_http_headers.file_cache_control
-        file_content_md5 = None
-        if file_http_headers is not None:
-            file_content_md5 = file_http_headers.file_content_md5
-        file_content_disposition = None
-        if file_http_headers is not None:
-            file_content_disposition = file_http_headers.file_content_disposition
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_http_headers.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if file_content_length is not None:
-            header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-        if file_content_type is not None:
-            header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str')
-        if file_content_encoding is not None:
-            header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str')
-        if file_content_language is not None:
-            header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str')
-        if file_cache_control is not None:
-            header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str')
-        if file_content_md5 is not None:
-            header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray')
-        if file_content_disposition is not None:
-            header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')),
-                'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')),
-                'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')),
-                'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')),
-                'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')),
-                'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Updates user-defined metadata for the specified file.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param duration: Specifies the duration of the lease, in seconds, or
-         negative one (-1) for a lease that never expires. A non-infinite lease
-         can be between 15 and 60 seconds. A lease duration cannot be changed
-         using renew or change.
-        :type duration: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The File service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "lease"
-        action = "acquire"
-
-        # Construct URL
-        url = self.acquire_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if duration is not None:
-            header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def release_lease(self, lease_id, timeout=None, request_id=None, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "lease"
-        action = "release"
-
-        # Construct URL
-        url = self.release_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param lease_id: Specifies the current lease ID on the resource.
-        :type lease_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param proposed_lease_id: Proposed lease ID, in a GUID string format.
-         The File service returns 400 (Invalid request) if the proposed lease
-         ID is not in the correct format. See Guid Constructor (String) for a
-         list of valid GUID string formats.
-        :type proposed_lease_id: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "lease"
-        action = "change"
-
-        # Construct URL
-        url = self.change_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-        if proposed_lease_id is not None:
-            header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
-        """[Update] The Lease File operation establishes and manages a lock on a
-        file for write and delete operations.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "lease"
-        action = "break"
-
-        # Construct URL
-        url = self.break_lease.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-        header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
-                'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Upload a range of bytes to a file.
-
-        :param range: Specifies the range of bytes to be written. Both the
-         start and end of the range must be specified. For an update operation,
-         the range can be up to 4 MB in size. For a clear operation, the range
-         can be up to the value of the file's full size. The File service
-         accepts only a single byte range for the Range and 'x-ms-range'
-         headers, and the byte range must be specified in the following format:
-         bytes=startByte-endByte.
-        :type range: str
-        :param file_range_write: Specify one of the following options: -
-         Update: Writes the bytes specified by the request body into the
-         specified range. The Range and Content-Length headers must match to
-         perform the update. - Clear: Clears the specified range and releases
-         the space used in storage for that range. To clear a range, set the
-         Content-Length header to zero, and set the Range header to a value
-         that indicates the range to clear, up to maximum file size. Possible
-         values include: 'update', 'clear'
-        :type file_range_write: str or
-         ~azure.storage.fileshare.models.FileRangeWriteType
-        :param content_length: Specifies the number of bytes being transmitted
-         in the request body. When the x-ms-write header is set to clear, the
-         value of this header must be set to zero.
-        :type content_length: long
-        :param optionalbody: Initial data.
-        :type optionalbody: Generator
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param content_md5: An MD5 hash of the content. This hash is used to
-         verify the integrity of the data during transport. When the
-         Content-MD5 header is specified, the File service compares the hash of
-         the content that has arrived with the header value that was sent. If
-         the two hashes do not match, the operation will fail with error code
-         400 (Bad Request).
-        :type content_md5: bytearray
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "range"
-
-        # Construct URL
-        url = self.upload_range.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/octet-stream'
-        header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if content_md5 is not None:
-            header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct body
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Upload a range of bytes to a file where the contents are read from a
-        URL.
-
-        :param range: Writes data to the specified byte range in the file.
-        :type range: str
-        :param copy_source: Specifies the URL of the source file or blob, up
-         to 2 KB in length. To copy a file to another file within the same
-         storage account, you may use Shared Key to authenticate the source
-         file. If you are copying a file from another storage account, or if
-         you are copying a blob from the same storage account or another
-         storage account, then you must authenticate the source file or blob
-         using a shared access signature. If the source is a public blob, no
-         authentication is required to perform the copy operation. A file in a
-         share snapshot can also be specified as a copy source.
-        :type copy_source: str
-        :param content_length: Specifies the number of bytes being transmitted
-         in the request body. When the x-ms-write header is set to clear, the
-         value of this header must be set to zero.
-        :type content_length: long
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param source_range: Bytes of source data in the specified range.
-        :type source_range: str
-        :param source_content_crc64: Specify the crc64 calculated for the
-         range of bytes that must be read from the copy source.
-        :type source_content_crc64: bytearray
-        :param source_modified_access_conditions: Additional parameters for
-         the operation
-        :type source_modified_access_conditions:
-         ~azure.storage.fileshare.models.SourceModifiedAccessConditions
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        source_if_match_crc64 = None
-        if source_modified_access_conditions is not None:
-            source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64
-        source_if_none_match_crc64 = None
-        if source_modified_access_conditions is not None:
-            source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "range"
-
-        # Construct URL
-        url = self.upload_range_from_url.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        if source_range is not None:
-            header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
-        header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str')
-        header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
-        if source_content_crc64 is not None:
-            header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if source_if_match_crc64 is not None:
-            header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray')
-        if source_if_none_match_crc64 is not None:
-            header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def get_range_list(self, sharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Returns the list of valid ranges for a file.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param range: Specifies the range of bytes over which to list ranges,
-         inclusively.
-        :type range: str
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.fileshare.models.Range]
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "rangelist"
-
-        # Construct URL
-        url = self.get_range_list.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if range is not None:
-            header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[Range]', response)
-            header_dict = {
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Copies a blob or file to a destination file within the storage account.
-
-        :param copy_source: Specifies the URL of the source file or blob, up
-         to 2 KB in length. To copy a file to another file within the same
-         storage account, you may use Shared Key to authenticate the source
-         file. If you are copying a file from another storage account, or if
-         you are copying a blob from the same storage account or another
-         storage account, then you must authenticate the source file or blob
-         using a shared access signature. If the source is a public blob, no
-         authentication is required to perform the copy operation. A file in a
-         share snapshot can also be specified as a copy source.
-        :type copy_source: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param file_permission: If specified the permission (security
-         descriptor) shall be set for the directory/file. This header can be
-         used if Permission size is <= 8KB, else x-ms-file-permission-key
-         header shall be used. Default value: Inherit. If SDDL is specified as
-         input, it must have owner, group and dacl. Note: Only one of the
-         x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file. Note: Only one of the x-ms-file-permission or
-         x-ms-file-permission-key should be specified.
-        :type file_permission_key: str
-        :param copy_file_smb_info: Additional parameters for the operation
-        :type copy_file_smb_info:
-         ~azure.storage.fileshare.models.CopyFileSmbInfo
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        file_permission_copy_mode = None
-        if copy_file_smb_info is not None:
-            file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode
-        ignore_read_only = None
-        if copy_file_smb_info is not None:
-            ignore_read_only = copy_file_smb_info.ignore_read_only
-        file_attributes = None
-        if copy_file_smb_info is not None:
-            file_attributes = copy_file_smb_info.file_attributes
-        file_creation_time = None
-        if copy_file_smb_info is not None:
-            file_creation_time = copy_file_smb_info.file_creation_time
-        file_last_write_time = None
-        if copy_file_smb_info is not None:
-            file_last_write_time = copy_file_smb_info.file_last_write_time
-        set_archive_attribute = None
-        if copy_file_smb_info is not None:
-            set_archive_attribute = copy_file_smb_info.set_archive_attribute
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        # Construct URL
-        url = self.start_copy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
-        if file_permission is not None:
-            header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str')
-        if file_permission_key is not None:
-            header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        if file_permission_copy_mode is not None:
-            header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType')
-        if ignore_read_only is not None:
-            header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool')
-        if file_attributes is not None:
-            header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str')
-        if file_creation_time is not None:
-            header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str')
-        if file_last_write_time is not None:
-            header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str')
-        if set_archive_attribute is not None:
-            header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
-                'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
-        """Aborts a pending Copy File operation, and leaves a destination file
-        with zero length and full metadata.
-
-        :param copy_id: The copy identifier provided in the x-ms-copy-id
-         header of the original Copy File operation.
-        :type copy_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param lease_access_conditions: Additional parameters for the
-         operation
-        :type lease_access_conditions:
-         ~azure.storage.fileshare.models.LeaseAccessConditions
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        lease_id = None
-        if lease_access_conditions is not None:
-            lease_id = lease_access_conditions.lease_id
-
-        comp = "copy"
-
-        # Construct URL
-        url = self.abort_copy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if lease_id is not None:
-            header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, cls=None, **kwargs):
-        """Lists handles for file.
-
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListHandlesResponse or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "listhandles"
-
-        # Construct URL
-        url = self.list_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListHandlesResponse', response)
-            header_dict = {
-                'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
-
-    def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, cls=None, **kwargs):
-        """Closes all handles open for given file.
-
-        :param handle_id: Specifies handle ID opened on the file or directory
-         to be closed. Asterix (‘*’) is a wildcard that specifies all handles.
-        :type handle_id: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "forceclosehandles"
-
-        # Construct URL
-        url = self.force_close_handles.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')),
-                'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')),
-                'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_service_operations.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_service_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_service_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_service_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,253 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class ServiceOperations(object):
-    """ServiceOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "service".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "service"
-
-    def set_properties(self, storage_service_properties, timeout=None, cls=None, **kwargs):
-        """Sets properties for a storage account's File service endpoint,
-        including properties for Storage Analytics metrics and CORS
-        (Cross-Origin Resource Sharing) rules.
-
-        :param storage_service_properties: The StorageService properties.
-        :type storage_service_properties:
-         ~azure.storage.fileshare.models.StorageServiceProperties
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/'}
-
-    def get_properties(self, timeout=None, cls=None, **kwargs):
-        """Gets the properties of a storage account's File service, including
-        properties for Storage Analytics metrics and CORS (Cross-Origin
-        Resource Sharing) rules.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceProperties or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.StorageServiceProperties
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceProperties', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_properties.metadata = {'url': '/'}
-
-    def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, cls=None, **kwargs):
-        """The List Shares Segment operation returns a list of the shares and
-        share snapshots under the specified account.
-
-        :param prefix: Filters the results to return only entries whose name
-         begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         to be returned with the next list operation. The operation returns a
-         marker value within the response body if the list returned was not
-         complete. The marker value may then be used in a subsequent call to
-         request the next set of list items. The marker value is opaque to the
-         client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of entries to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5,000, the server will return up to 5,000 items.
-        :type maxresults: int
-        :param include: Include this parameter to specify one or more datasets
-         to include in the response.
-        :type include: list[str or
-         ~azure.storage.fileshare.models.ListSharesIncludeType]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListSharesResponse or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ListSharesResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_shares_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListSharesResponse', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_shares_segment.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_share_operations.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_share_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_share_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/operations/_share_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,751 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class ShareOperations(object):
-    """ShareOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "share".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "share"
-
-    def create(self, timeout=None, metadata=None, quota=None, cls=None, **kwargs):
-        """Creates a new share under the specified account. If the share with the
-        same name already exists, the operation fails.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param quota: Specifies the maximum size of the share, in gigabytes.
-        :type quota: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        if quota is not None:
-            header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{shareName}'}
-
-    def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs):
-        """Returns all user-defined metadata and system properties for the
-        specified share or share snapshot. The data returned does not include
-        the share's list of files.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')),
-                'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')),
-                'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')),
-                'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')),
-                'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{shareName}'}
-
-    def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, cls=None, **kwargs):
-        """Operation marks the specified share or share snapshot for deletion. The
-        share or share snapshot and any files contained within it are later
-        deleted during garbage collection.
-
-        :param sharesnapshot: The snapshot parameter is an opaque DateTime
-         value that, when present, specifies the share snapshot to query.
-        :type sharesnapshot: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param delete_snapshots: Specifies the option include to delete the
-         base share and all of its snapshots. Possible values include:
-         'include'
-        :type delete_snapshots: str or
-         ~azure.storage.fileshare.models.DeleteSnapshotsOptionType
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if sharesnapshot is not None:
-            query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if delete_snapshots is not None:
-            header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{shareName}'}
-
-    def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs):
-        """Creates a read-only snapshot of a share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "snapshot"
-
-        # Construct URL
-        url = self.create_snapshot.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create_snapshot.metadata = {'url': '/{shareName}'}
-
-    def create_permission(self, share_permission, timeout=None, cls=None, **kwargs):
-        """Create a permission (a security descriptor).
-
-        :param share_permission: A permission (a security descriptor) at the
-         share level.
-        :type share_permission:
-         ~azure.storage.fileshare.models.SharePermission
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "filepermission"
-
-        # Construct URL
-        url = self.create_permission.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False)
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create_permission.metadata = {'url': '/{shareName}'}
-
-    def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs):
-        """Returns the permission (security descriptor) for a given key.
-
-        :param file_permission_key: Key of the permission to be set for the
-         directory/file.
-        :type file_permission_key: str
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: SharePermission or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.SharePermission
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "filepermission"
-
-        # Construct URL
-        url = self.get_permission.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/json'
-        header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('SharePermission', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_permission.metadata = {'url': '/{shareName}'}
-
-    def set_quota(self, timeout=None, quota=None, cls=None, **kwargs):
-        """Sets quota for the specified share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param quota: Specifies the maximum size of the share, in gigabytes.
-        :type quota: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_quota.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if quota is not None:
-            header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_quota.metadata = {'url': '/{shareName}'}
-
-    def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs):
-        """Sets one or more user-defined name-value pairs for the specified share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param metadata: A name-value pair to associate with a file storage
-         object.
-        :type metadata: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{shareName}'}
-
-    def get_access_policy(self, timeout=None, cls=None, **kwargs):
-        """Returns information about stored access policies specified on the
-        share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.fileshare.models.SignedIdentifier]
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.get_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[SignedIdentifier]', response)
-            header_dict = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_access_policy.metadata = {'url': '/{shareName}'}
-
-    def set_access_policy(self, share_acl=None, timeout=None, cls=None, **kwargs):
-        """Sets a stored access policy for use with shared access signatures.
-
-        :param share_acl: The ACL for the share.
-        :type share_acl:
-         list[~azure.storage.fileshare.models.SignedIdentifier]
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.set_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct body
-        serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}}
-        if share_acl is not None:
-            body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_access_policy.metadata = {'url': '/{shareName}'}
-
-    def get_statistics(self, timeout=None, cls=None, **kwargs):
-        """Retrieves statistics related to the share.
-
-        :param timeout: The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
-         Timeouts for File Service Operations.</a>
-        :type timeout: int
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ShareStats or the result of cls(response)
-        :rtype: ~azure.storage.fileshare.models.ShareStats
-        :raises:
-         :class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "stats"
-
-        # Construct URL
-        url = self.get_statistics.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ShareStats', response)
-            header_dict = {
-                'ETag': self._deserialize('str', response.headers.get('ETag')),
-                'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_statistics.metadata = {'url': '/{shareName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/version.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/version.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_generated/version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-VERSION = "2019-07-07"
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_lease.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_lease.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_lease.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_lease.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,170 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import uuid
-
-from typing import (  # pylint: disable=unused-import
-    Optional, Any, TypeVar, TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator import distributed_trace
-
-from ._shared.response_handlers import return_response_headers, process_storage_error
-from ._generated.models import StorageErrorException
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    ShareFileClient = TypeVar("ShareFileClient")
-
-
-class ShareLeaseClient(object):
-    """Creates a new ShareLeaseClient.
-
-    This client provides lease operations on a ShareFileClient.
-
-    :ivar str id:
-        The ID of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired.
-    :ivar str etag:
-        The ETag of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired or modified.
-    :ivar ~datetime.datetime last_modified:
-        The last modified timestamp of the lease currently being maintained.
-        This will be `None` if no lease has yet been acquired or modified.
-
-    :param client:
-        The client of the file to lease.
-    :type client: ~azure.storage.fileshare.ShareFileClient
-    :param str lease_id:
-        A string representing the lease ID of an existing lease. This value does not
-        need to be specified in order to acquire a new lease, or break one.
-    """
-    def __init__(
-            self, client, lease_id=None
-    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
-        # type: (ShareFileClient, Optional[str]) -> None
-        self.id = lease_id or str(uuid.uuid4())
-        self.last_modified = None
-        self.etag = None
-        if hasattr(client, 'file_name'):
-            self._client = client._client.file  # type: ignore # pylint: disable=protected-access
-        else:
-            raise TypeError("Lease must use ShareFileClient.")
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *args):
-        self.release()
-
-    @distributed_trace
-    def acquire(self, **kwargs):
-        # type: (int, **Any) -> None
-        """Requests a new lease. This operation establishes and manages a lock on a
-        file for write and delete operations. If the file does not have an active lease,
-        the File service creates a lease on the file. If the file has an active lease,
-        you can only request a new lease using the active lease ID.
-
-
-        If the file does not have an active lease, the File service creates a
-        lease on the file and returns a new lease ID.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        try:
-            response = self._client.acquire_lease(
-                timeout=kwargs.pop('timeout', None),
-                duration=-1,
-                proposed_lease_id=self.id,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-        self.etag = response.get('etag')  # type: str
-
-    @distributed_trace
-    def release(self, **kwargs):
-        # type: (Any) -> None
-        """Releases the lease. The lease may be released if the lease ID specified on the request matches
-        that associated with the file. Releasing the lease allows another client to immediately acquire the lease
-        for the file as soon as the release is complete.
-
-
-        The lease may be released if the client lease id specified matches
-        that associated with the file. Releasing the lease allows another client
-        to immediately acquire the lease for the file as soon as the release is complete.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        try:
-            response = self._client.release_lease(
-                lease_id=self.id,
-                timeout=kwargs.pop('timeout', None),
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace
-    def change(self, proposed_lease_id, **kwargs):
-        # type: (str, Any) -> None
-        """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and
-        a new lease ID in x-ms-proposed-lease-id.
-
-
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The File service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        try:
-            response = self._client.change_lease(
-                lease_id=self.id,
-                proposed_lease_id=proposed_lease_id,
-                timeout=kwargs.pop('timeout', None),
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace
-    def break_lease(self, **kwargs):
-        # type: (Optional[int], Any) -> int
-        """Force breaks the lease if the file has an active lease. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. An infinite lease breaks immediately.
-
-        Once a lease is broken, it cannot be changed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :rtype: int
-        """
-        try:
-            response = self._client.break_lease(
-                timeout=kwargs.pop('timeout', None),
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return response.get('lease_time')  # type: ignore
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_models.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,905 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called, too-many-lines
-
-from azure.core.paging import PageIterator
-from ._parser import _parse_datetime_from_str
-from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
-from ._shared.models import DictMixin, get_enum_value
-from ._generated.models import StorageErrorException
-from ._generated.models import Metrics as GeneratedMetrics
-from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
-from ._generated.models import CorsRule as GeneratedCorsRule
-from ._generated.models import AccessPolicy as GenAccessPolicy
-from ._generated.models import DirectoryItem
-
-
-def _wrap_item(item):
-    if isinstance(item, DirectoryItem):
-        return {'name': item.name, 'is_directory': True}
-    return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False}
-
-
-class Metrics(GeneratedMetrics):
-    """A summary of request statistics grouped by API in hour or minute aggregates
-    for files.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :keyword str version: The version of Storage Analytics to configure.
-    :keyword bool enabled: Required. Indicates whether metrics are enabled for the
-        File service.
-    :keyword bool include_ap_is: Indicates whether metrics should generate summary
-        statistics for called API operations.
-    :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should
-        persist.
-    """
-
-    def __init__(self, **kwargs):
-        self.version = kwargs.get('version', u'1.0')
-        self.enabled = kwargs.get('enabled', False)
-        self.include_apis = kwargs.get('include_apis')
-        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            version=generated.version,
-            enabled=generated.enabled,
-            include_apis=generated.include_apis,
-            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
-        )
-
-
-class RetentionPolicy(GeneratedRetentionPolicy):
-    """The retention policy which determines how long the associated data should
-    persist.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param bool enabled: Required. Indicates whether a retention policy is enabled
-        for the storage service.
-    :param int days: Indicates the number of days that metrics or logging or
-        soft-deleted data should be retained. All data older than this value will
-        be deleted.
-    """
-
-    def __init__(self, enabled=False, days=None):
-        self.enabled = enabled
-        self.days = days
-        if self.enabled and (self.days is None):
-            raise ValueError("If policy is enabled, 'days' must be specified.")
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            enabled=generated.enabled,
-            days=generated.days,
-        )
-
-
-class CorsRule(GeneratedCorsRule):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param list(str) allowed_origins:
-        A list of origin domains that will be allowed via CORS, or "*" to allow
-        all domains. The list of must contain at least one entry. Limited to 64
-        origin domains. Each allowed origin can have up to 256 characters.
-    :param list(str) allowed_methods:
-        A list of HTTP methods that are allowed to be executed by the origin.
-        The list of must contain at least one entry. For Azure Storage,
-        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-    :keyword list(str) allowed_headers:
-        Defaults to an empty list. A list of headers allowed to be part of
-        the cross-origin request. Limited to 64 defined headers and 2 prefixed
-        headers. Each header can be up to 256 characters.
-    :keyword list(str) exposed_headers:
-        Defaults to an empty list. A list of response headers to expose to CORS
-        clients. Limited to 64 defined headers and two prefixed headers. Each
-        header can be up to 256 characters.
-    :keyword int max_age_in_seconds:
-        The number of seconds that the client/browser should cache a
-        preflight response.
-    """
-
-    def __init__(self, allowed_origins, allowed_methods, **kwargs):
-        self.allowed_origins = ','.join(allowed_origins)
-        self.allowed_methods = ','.join(allowed_methods)
-        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
-        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
-        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
-
-    @classmethod
-    def _from_generated(cls, generated):
-        return cls(
-            [generated.allowed_origins],
-            [generated.allowed_methods],
-            allowed_headers=[generated.allowed_headers],
-            exposed_headers=[generated.exposed_headers],
-            max_age_in_seconds=generated.max_age_in_seconds,
-        )
-
-
-class AccessPolicy(GenAccessPolicy):
-    """Access Policy class used by the set and get acl methods in each service.
-
-    A stored access policy can specify the start time, expiry time, and
-    permissions for the Shared Access Signatures with which it's associated.
-    Depending on how you want to control access to your resource, you can
-    specify all of these parameters within the stored access policy, and omit
-    them from the URL for the Shared Access Signature. Doing so permits you to
-    modify the associated signature's behavior at any time, as well as to revoke
-    it. Or you can specify one or more of the access policy parameters within
-    the stored access policy, and the others on the URL. Finally, you can
-    specify all of the parameters on the URL. In this case, you can use the
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must
-    include all fields required to authenticate the signature. If any required
-    fields are missing, the request will fail. Likewise, if a field is specified
-    both in the Shared Access Signature URL and in the stored access policy, the
-    request will fail with status code 400 (Bad Request).
-
-    :param permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :type permission: str or ~azure.storage.fileshare.FileSasPermissions or
-        ~azure.storage.fileshare.ShareSasPermissions
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    """
-    def __init__(self, permission=None, expiry=None, start=None):
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class LeaseProperties(DictMixin):
-    """File Lease Properties.
-
-    :ivar str status:
-        The lease status of the file. Possible values: locked|unlocked
-    :ivar str state:
-        Lease state of the file. Possible values: available|leased|expired|breaking|broken
-    :ivar str duration:
-        When a file is leased, specifies whether the lease is of infinite or fixed duration.
-    """
-
-    def __init__(self, **kwargs):
-        self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
-        self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
-        self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
-
-    @classmethod
-    def _from_generated(cls, generated):
-        lease = cls()
-        lease.status = get_enum_value(generated.properties.lease_status)
-        lease.state = get_enum_value(generated.properties.lease_state)
-        lease.duration = get_enum_value(generated.properties.lease_duration)
-        return lease
-
-
-class ContentSettings(DictMixin):
-    """Used to store the content settings of a file.
-
-    :param str content_type:
-        The content type specified for the file. If no content type was
-        specified, the default content type is application/octet-stream.
-    :param str content_encoding:
-        If the content_encoding has previously been set
-        for the file, that value is stored.
-    :param str content_language:
-        If the content_language has previously been set
-        for the file, that value is stored.
-    :param str content_disposition:
-        content_disposition conveys additional information about how to
-        process the response payload, and also can be used to attach
-        additional metadata. If content_disposition has previously been set
-        for the file, that value is stored.
-    :param str cache_control:
-        If the cache_control has previously been set for
-        the file, that value is stored.
-    :param str content_md5:
-        If the content_md5 has been set for the file, this response
-        header is stored so that the client can check for message content
-        integrity.
-    """
-
-    def __init__(
-            self, content_type=None, content_encoding=None,
-            content_language=None, content_disposition=None,
-            cache_control=None, content_md5=None, **kwargs):
-
-        self.content_type = content_type or kwargs.get('Content-Type')
-        self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
-        self.content_language = content_language or kwargs.get('Content-Language')
-        self.content_md5 = content_md5 or kwargs.get('Content-MD5')
-        self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
-        self.cache_control = cache_control or kwargs.get('Cache-Control')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        settings = cls()
-        settings.content_type = generated.properties.content_type or None
-        settings.content_encoding = generated.properties.content_encoding or None
-        settings.content_language = generated.properties.content_language or None
-        settings.content_md5 = generated.properties.content_md5 or None
-        settings.content_disposition = generated.properties.content_disposition or None
-        settings.cache_control = generated.properties.cache_control or None
-        return settings
-
-
-class ShareProperties(DictMixin):
-    """Share's properties class.
-
-    :ivar str name:
-        The name of the share.
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the share was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int quota:
-        The allocated quota.
-    :ivar dict metadata: A dict with name_value pairs to associate with the
-        share as metadata.
-    :ivar str snapshot:
-        Snapshot of the share.
-    """
-
-    def __init__(self, **kwargs):
-        self.name = None
-        self.last_modified = kwargs.get('Last-Modified')
-        self.etag = kwargs.get('ETag')
-        self.quota = kwargs.get('x-ms-share-quota')
-        self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time')
-        self.metadata = kwargs.get('metadata')
-        self.snapshot = None
-        self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps')
-        self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps')
-        self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        props = cls()
-        props.name = generated.name
-        props.last_modified = generated.properties.last_modified
-        props.etag = generated.properties.etag
-        props.quota = generated.properties.quota
-        props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time
-        props.metadata = generated.metadata
-        props.snapshot = generated.snapshot
-        props.provisioned_egress_mbps = generated.properties.provisioned_egress_mbps
-        props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_mbps
-        props.provisioned_iops = generated.properties.provisioned_iops
-        return props
-
-
-class SharePropertiesPaged(PageIterator):
-    """An iterable of Share properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A file name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.fileshare.ShareProperties)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only shares whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of share names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(SharePropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                prefix=self.prefix,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items]  # pylint: disable=protected-access
-        return self._response.next_marker or None, self.current_page
-
-
-class Handle(DictMixin):
-    """A listed Azure Storage handle item.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :keyword str handle_id: Required. XSMB service handle ID
-    :keyword str path: Required. File or directory name including full path starting
-     from share root
-    :keyword str file_id: Required. FileId uniquely identifies the file or
-     directory.
-    :keyword str parent_id: ParentId uniquely identifies the parent directory of the
-     object.
-    :keyword str session_id: Required. SMB session ID in context of which the file
-     handle was opened
-    :keyword str client_ip: Required. Client IP that opened the handle
-    :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened
-     the handle has last been reconnected. (UTC)
-    :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC)
-    """
-
-    def __init__(self, **kwargs):
-        self.id = kwargs.get('handle_id')
-        self.path = kwargs.get('path')
-        self.file_id = kwargs.get('file_id')
-        self.parent_id = kwargs.get('parent_id')
-        self.session_id = kwargs.get('session_id')
-        self.client_ip = kwargs.get('client_ip')
-        self.open_time = kwargs.get('open_time')
-        self.last_reconnect_time = kwargs.get('last_reconnect_time')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        handle = cls()
-        handle.id = generated.handle_id
-        handle.path = generated.path
-        handle.file_id = generated.file_id
-        handle.parent_id = generated.parent_id
-        handle.session_id = generated.session_id
-        handle.client_ip = generated.client_ip
-        handle.open_time = generated.open_time
-        handle.last_reconnect_time = generated.last_reconnect_time
-        return handle
-
-
-class HandlesPaged(PageIterator):
-    """An iterable of Handles.
-
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.fileshare.Handle)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param int results_per_page: The maximum number of share names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, results_per_page=None, continuation_token=None):
-        super(HandlesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.current_page = [Handle._from_generated(h) for h in self._response.handle_list]  # pylint: disable=protected-access
-        return self._response.next_marker or None, self.current_page
-
-
-class DirectoryProperties(DictMixin):
-    """Directory's properties class.
-
-    :ivar str name:
-        The name of the directory.
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the directory was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar bool server_encrypted:
-        Whether encryption is enabled.
-    :keyword dict metadata: A dict with name_value pairs to associate with the
-        directory as metadata.
-    :ivar change_time: Change time for the file.
-    :vartype change_time: str or ~datetime.datetime
-    :ivar creation_time: Creation time for the file.
-    :vartype creation_time: str or ~datetime.datetime
-    :ivar last_write_time: Last write time for the file.
-    :vartype last_write_time: str or ~datetime.datetime
-    :ivar file_attributes:
-        The file system attributes for files and directories.
-    :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-    :ivar permission_key: Key of the permission to be set for the
-        directory/file.
-    :vartype permission_key: str
-    :ivar file_id: Required. FileId uniquely identifies the file or
-     directory.
-    :vartype file_id: str
-    :ivar parent_id: ParentId uniquely identifies the parent directory of the
-     object.
-    :vartype parent_id: str
-    """
-
-    def __init__(self, **kwargs):
-        self.name = None
-        self.last_modified = kwargs.get('Last-Modified')
-        self.etag = kwargs.get('ETag')
-        self.server_encrypted = kwargs.get('x-ms-server-encrypted')
-        self.metadata = kwargs.get('metadata')
-        self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time'))
-        self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time'))
-        self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time'))
-        self.file_attributes = kwargs.get('x-ms-file-attributes')
-        self.permission_key = kwargs.get('x-ms-file-permission-key')
-        self.file_id = kwargs.get('x-ms-file-id')
-        self.parent_id = kwargs.get('x-ms-file-parent-id')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        props = cls()
-        props.name = generated.name
-        props.last_modified = generated.properties.last_modified
-        props.etag = generated.properties.etag
-        props.server_encrypted = generated.properties.server_encrypted
-        props.metadata = generated.metadata
-        return props
-
-
-class DirectoryPropertiesPaged(PageIterator):
-    """An iterable for the contents of a directory.
-
-    This iterable will yield dicts for the contents of the directory. The dicts
-    will have the keys 'name' (str) and 'is_directory' (bool).
-    Items that are files (is_directory=False) will have an additional 'content_length' key.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A file name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(dict(str, Any))
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only directories whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of share names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(DirectoryPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                marker=continuation_token or None,
-                prefix=self.prefix,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items]
-        self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items])
-        return self._response.next_marker or None, self.current_page
-
-
-class FileProperties(DictMixin):
-    """File's properties class.
-
-    :ivar str name:
-        The name of the file.
-    :ivar str path:
-        The path of the file.
-    :ivar str share:
-        The name of share.
-    :ivar str snapshot:
-        File snapshot.
-    :ivar int content_length:
-        Size of file in bytes.
-    :ivar dict metadata: A dict with name_value pairs to associate with the
-        file as metadata.
-    :ivar str file_type:
-        Type of the file.
-    :ivar ~datetime.datetime last_modified:
-        A datetime object representing the last time the file was modified.
-    :ivar str etag:
-        The ETag contains a value that you can use to perform operations
-        conditionally.
-    :ivar int size:
-        Size of file in bytes.
-    :ivar str content_range:
-        The range of bytes.
-    :ivar bool server_encrypted:
-        Whether encryption is enabled.
-    :ivar copy:
-        The copy properties.
-    :vartype copy: ~azure.storage.fileshare.CopyProperties
-    :ivar content_settings:
-        The content settings for the file.
-    :vartype content_settings: ~azure.storage.fileshare.ContentSettings
-    """
-
-    def __init__(self, **kwargs):
-        self.name = kwargs.get('name')
-        self.path = None
-        self.share = None
-        self.snapshot = None
-        self.content_length = kwargs.get('Content-Length')
-        self.metadata = kwargs.get('metadata')
-        self.file_type = kwargs.get('x-ms-type')
-        self.last_modified = kwargs.get('Last-Modified')
-        self.etag = kwargs.get('ETag')
-        self.size = kwargs.get('Content-Length')
-        self.content_range = kwargs.get('Content-Range')
-        self.server_encrypted = kwargs.get('x-ms-server-encrypted')
-        self.copy = CopyProperties(**kwargs)
-        self.content_settings = ContentSettings(**kwargs)
-        self.lease = LeaseProperties(**kwargs)
-        self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time'))
-        self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time'))
-        self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time'))
-        self.file_attributes = kwargs.get('x-ms-file-attributes')
-        self.permission_key = kwargs.get('x-ms-file-permission-key')
-        self.file_id = kwargs.get('x-ms-file-id')
-        self.parent_id = kwargs.get('x-ms-file-parent-id')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        props = cls()
-        props.name = generated.name
-        props.content_length = generated.properties.content_length
-        props.metadata = generated.properties.metadata
-        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
-        return props
-
-
-class CopyProperties(DictMixin):
-    """File Copy Properties.
-
-    :ivar str id:
-        String identifier for the last attempted Copy File operation where this file
-        was the destination file. This header does not appear if this file has never
-        been the destination in a Copy File operation, or if this file has been
-        modified after a concluded Copy File operation.
-    :ivar str source:
-        URL up to 2 KB in length that specifies the source file used in the last attempted
-        Copy File operation where this file was the destination file. This header does not
-        appear if this file has never been the destination in a Copy File operation, or if
-        this file has been modified after a concluded Copy File operation.
-    :ivar str status:
-        State of the copy operation identified by Copy ID, with these values:
-            success:
-                Copy completed successfully.
-            pending:
-                Copy is in progress. Check copy_status_description if intermittent,
-                non-fatal errors impede copy progress but don't cause failure.
-            aborted:
-                Copy was ended by Abort Copy File.
-            failed:
-                Copy failed. See copy_status_description for failure details.
-    :ivar str progress:
-        Contains the number of bytes copied and the total bytes in the source in the last
-        attempted Copy File operation where this file was the destination file. Can show
-        between 0 and Content-Length bytes copied.
-    :ivar datetime completion_time:
-        Conclusion time of the last attempted Copy File operation where this file was the
-        destination file. This value can specify the time of a completed, aborted, or
-        failed copy attempt.
-    :ivar str status_description:
-        Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
-        or non-fatal copy operation failure.
-    :ivar bool incremental_copy:
-        Copies the snapshot of the source file to a destination file.
-        The snapshot is copied such that only the differential changes between
-        the previously copied snapshot are transferred to the destination
-    :ivar datetime destination_snapshot:
-        Included if the file is incremental copy or incremental copy snapshot,
-        if x-ms-copy-status is success. Snapshot time of the last successful
-        incremental copy snapshot for this file.
-    """
-
-    def __init__(self, **kwargs):
-        self.id = kwargs.get('x-ms-copy-id')
-        self.source = kwargs.get('x-ms-copy-source')
-        self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
-        self.progress = kwargs.get('x-ms-copy-progress')
-        self.completion_time = kwargs.get('x-ms-copy-completion_time')
-        self.status_description = kwargs.get('x-ms-copy-status-description')
-        self.incremental_copy = kwargs.get('x-ms-incremental-copy')
-        self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        copy = cls()
-        copy.id = generated.properties.copy_id or None
-        copy.status = get_enum_value(generated.properties.copy_status) or None
-        copy.source = generated.properties.copy_source or None
-        copy.progress = generated.properties.copy_progress or None
-        copy.completion_time = generated.properties.copy_completion_time or None
-        copy.status_description = generated.properties.copy_status_description or None
-        copy.incremental_copy = generated.properties.incremental_copy or None
-        copy.destination_snapshot = generated.properties.destination_snapshot or None
-        return copy
-
-
-class FileSasPermissions(object):
-    """FileSasPermissions class to be used with
-    generating shared access signature operations.
-
-    :param bool read:
-        Read the content, properties, metadata. Use the file as the source of a copy
-        operation.
-    :param bool create:
-        Create a new file or copy a file to a new file.
-    :param bool write:
-        Create or write content, properties, metadata. Resize the file. Use the file
-        as the destination of a copy operation within the same account.
-    :param bool delete:
-        Delete the file.
-    """
-    def __init__(self, read=False, create=False, write=False, delete=False):
-        self.read = read
-        self.create = create
-        self.write = write
-        self.delete = delete
-        self._str = (('r' if self.read else '') +
-                     ('c' if self.create else '') +
-                     ('w' if self.write else '') +
-                     ('d' if self.delete else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create a FileSasPermissions from a string.
-
-        To specify read, create, write, or delete permissions you need only to
-        include the first letter of the word in the string. E.g. For read and
-        create permissions, you would provide a string "rc".
-
-        :param str permission: The string which dictates the read, create,
-            write, or delete permissions
-        :return: A FileSasPermissions object
-        :rtype: ~azure.storage.fileshare.FileSasPermissions
-        """
-        p_read = 'r' in permission
-        p_create = 'c' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-
-        parsed = cls(p_read, p_create, p_write, p_delete)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-
-class ShareSasPermissions(object):
-    """ShareSasPermissions class to be used to be used with
-    generating shared access signature and access policy operations.
-
-    :param bool read:
-        Read the content, properties or metadata of any file in the share. Use any
-        file in the share as the source of a copy operation.
-    :param bool write:
-        For any file in the share, create or write content, properties or metadata.
-        Resize the file. Use the file as the destination of a copy operation within
-        the same account.
-        Note: You cannot grant permissions to read or write share properties or
-        metadata with a service SAS. Use an account SAS instead.
-    :param bool delete:
-        Delete any file in the share.
-        Note: You cannot grant permissions to delete a share with a service SAS. Use
-        an account SAS instead.
-    :param bool list:
-        List files and directories in the share.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False):  # pylint: disable=redefined-builtin
-        self.read = read
-        self.write = write
-        self.delete = delete
-        self.list = list
-        self._str = (('r' if self.read else '') +
-                     ('w' if self.write else '') +
-                     ('d' if self.delete else '') +
-                     ('l' if self.list else ''))
-
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create a ShareSasPermissions from a string.
-
-        To specify read, write, delete, or list permissions you need only to
-        include the first letter of the word in the string. E.g. For read and
-        write permissions, you would provide a string "rw".
-
-        :param str permission: The string which dictates the read, write,
-            delete, or list permissions
-        :return: A ShareSasPermissions object
-        :rtype: ~azure.storage.fileshare.ShareSasPermissions
-        """
-        p_read = 'r' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-        p_list = 'l' in permission
-
-        parsed = cls(p_read, p_write, p_delete, p_list)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-class NTFSAttributes(object):
-    """
-    Valid set of attributes to set for file or directory.
-    To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory.
-
-    :ivar bool read_only:
-        Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE
-    :ivar bool hidden:
-        Enable/disable 'Hidden' attribute for DIRECTORY or FILE
-    :ivar bool system:
-        Enable/disable 'System' attribute for DIRECTORY or FILE
-    :ivar bool none:
-        Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY
-    :ivar bool directory:
-        Enable/disable 'Directory' attribute for DIRECTORY
-    :ivar bool archive:
-        Enable/disable 'Archive' attribute for DIRECTORY or FILE
-    :ivar bool temporary:
-        Enable/disable 'Temporary' attribute for FILE
-    :ivar bool offline:
-        Enable/disable 'Offline' attribute for DIRECTORY or FILE
-    :ivar bool not_content_indexed:
-        Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE
-    :ivar bool no_scrub_data:
-        Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE
-    """
-    def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False,
-                 temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False):
-
-        self.read_only = read_only
-        self.hidden = hidden
-        self.system = system
-        self.none = none
-        self.directory = directory
-        self.archive = archive
-        self.temporary = temporary
-        self.offline = offline
-        self.not_content_indexed = not_content_indexed
-        self.no_scrub_data = no_scrub_data
-        self._str = (('ReadOnly|' if self.read_only else '') +
-                               ('Hidden|' if self.hidden else '') +
-                               ('System|' if self.system else '') +
-                               ('None|' if self.none else '') +
-                               ('Directory|' if self.directory else '') +
-                               ('Archive|' if self.archive else '') +
-                               ('Temporary|' if self.temporary else '') +
-                               ('Offline|' if self.offline else '') +
-                               ('NotContentIndexed|' if self.not_content_indexed else '') +
-                               ('NoScrubData|' if self.no_scrub_data else ''))
-
-    def __str__(self):
-        concatenated_params = self._str
-        return concatenated_params.strip('|')
-
-    @classmethod
-    def from_string(cls, string):
-        """Create a NTFSAttributes from a string.
-
-        To specify permissions you can pass in a string with the
-        desired permissions, e.g. "ReadOnly|Hidden|System"
-
-        :param str string: The string which dictates the permissions.
-        :return: A NTFSAttributes object
-        :rtype: ~azure.storage.fileshare.NTFSAttributes
-        """
-        read_only = "ReadOnly" in string
-        hidden = "Hidden" in string
-        system = "System" in string
-        none = "None" in string
-        directory = "Directory" in string
-        archive = "Archive" in string
-        temporary = "Temporary" in string
-        offline = "Offline" in string
-        not_content_indexed = "NotContentIndexed" in string
-        no_scrub_data = "NoScrubData" in string
-
-        parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed,
-                     no_scrub_data)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-def service_properties_deserialize(generated):
-    """Deserialize a ServiceProperties objects into a dict.
-    """
-    return {
-        'hour_metrics': Metrics._from_generated(generated.hour_metrics),  # pylint: disable=protected-access
-        'minute_metrics': Metrics._from_generated(generated.minute_metrics),  # pylint: disable=protected-access
-        'cors': [CorsRule._from_generated(cors) for cors in generated.cors],  # pylint: disable=protected-access
-    }
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_parser.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_parser.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_parser.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_parser.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,42 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from datetime import datetime, timedelta
-
-_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time'
-_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \
-                            'please use file_permission_key'
-
-
-def _get_file_permission(file_permission, file_permission_key, default_permission):
-    # if file_permission and file_permission_key are both empty, then use the default_permission
-    # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used
-    if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024:
-        raise ValueError(_FILE_PERMISSION_TOO_LONG)
-
-    if not file_permission:
-        if not file_permission_key:
-            return default_permission
-        return None
-
-    if not file_permission_key:
-        return file_permission
-
-    raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS)
-
-
-def _parse_datetime_from_str(string_datetime):
-    if not string_datetime:
-        return None
-    dt, _, us = string_datetime.partition(".")
-    dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
-    us = int(us[:-2])  # microseconds
-    datetime_obj = dt + timedelta(microseconds=us)
-    return datetime_obj
-
-
-def _datetime_to_str(datetime_obj):
-    return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z'
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_serialize.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_serialize.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_serialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_serialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,110 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from azure.core import MatchConditions
-
-from ._parser import _datetime_to_str, _get_file_permission
-from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo
-
-
-_SUPPORTED_API_VERSIONS = [
-    '2019-02-02',
-    '2019-07-07'
-]
-
-
-def _get_match_headers(kwargs, match_param, etag_param):
-    # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str])
-    # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it.
-    if_match = None
-    if_none_match = None
-    match_condition = kwargs.pop(match_param, None)
-    if match_condition == MatchConditions.IfNotModified:
-        if_match = kwargs.pop(etag_param, None)
-        if not if_match:
-            raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
-    elif match_condition == MatchConditions.IfPresent:
-        if_match = '*'
-    elif match_condition == MatchConditions.IfModified:
-        if_none_match = kwargs.pop(etag_param, None)
-        if not if_none_match:
-            raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
-    elif match_condition == MatchConditions.IfMissing:
-        if_none_match = '*'
-    elif match_condition is None:
-        if etag_param in kwargs:
-            raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param))
-    else:
-        raise TypeError("Invalid match condition: {}".format(match_condition))
-    return if_match, if_none_match
-
-
-def get_source_conditions(kwargs):
-    # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
-    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
-    return SourceModifiedAccessConditions(
-        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
-        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
-        source_if_match=if_match or kwargs.pop('source_if_match', None),
-        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
-    )
-
-def get_access_conditions(lease):
-    # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
-    try:
-        lease_id = lease.id # type: ignore
-    except AttributeError:
-        lease_id = lease # type: ignore
-    return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
-
-
-def get_smb_properties(kwargs):
-    # type: (Dict[str, Any]) -> Dict[str, Any]
-    ignore_read_only = kwargs.pop('ignore_read_only', None)
-    set_archive_attribute = kwargs.pop('set_archive_attribute', None)
-    file_permission = kwargs.pop('file_permission', None)
-    file_permission_key = kwargs.pop('permission_key', None)
-    file_attributes = kwargs.pop('file_attributes', None)
-    file_creation_time = kwargs.pop('file_creation_time', None) or ""
-    file_last_write_time = kwargs.pop('file_last_write_time', None) or ""
-
-    file_permission_copy_mode = None
-    file_permission = _get_file_permission(file_permission, file_permission_key, None)
-
-    if file_permission:
-        if file_permission.lower() == "source":
-            file_permission = None
-            file_permission_copy_mode = "source"
-        else:
-            file_permission_copy_mode = "override"
-    elif file_permission_key:
-        if file_permission_key.lower() == "source":
-            file_permission_key = None
-            file_permission_copy_mode = "source"
-        else:
-            file_permission_copy_mode = "override"
-    return {
-        'file_permission': file_permission,
-        'file_permission_key': file_permission_key,
-        'copy_file_smb_info': CopyFileSmbInfo(
-            file_permission_copy_mode=file_permission_copy_mode,
-            ignore_read_only=ignore_read_only,
-            file_attributes=file_attributes,
-            file_creation_time=_datetime_to_str(file_creation_time),
-            file_last_write_time=_datetime_to_str(file_last_write_time),
-            set_archive_attribute=set_archive_attribute
-        )
-
-    }
-
-def get_api_version(kwargs, default):
-    # type: (Dict[str, Any]) -> str
-    api_version = kwargs.pop('api_version', None)
-    if api_version and api_version not in _SUPPORTED_API_VERSIONS:
-        versions = '\n'.join(_SUPPORTED_API_VERSIONS)
-        raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions))
-    return api_version or default
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_client.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_client.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,686 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Optional, Union, Dict, Any, Iterable, TYPE_CHECKING
-)
-try:
-    from urllib.parse import urlparse, quote, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.pipeline import Pipeline
-from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
-from ._shared.request_handlers import add_metadata_headers, serialize_iso
-from ._shared.response_handlers import (
-    return_response_headers,
-    process_storage_error,
-    return_headers_and_deserialized)
-from ._generated import AzureFileStorage
-from ._generated.version import VERSION
-from ._generated.models import (
-    StorageErrorException,
-    SignedIdentifier,
-    DeleteSnapshotsOptionType,
-    SharePermission)
-from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission
-from ._serialize import get_api_version
-from ._directory_client import ShareDirectoryClient
-from ._file_client import ShareFileClient
-
-if TYPE_CHECKING:
-    from ._models import ShareProperties, AccessPolicy
-
-
-class ShareClient(StorageAccountHostsMixin):
-    """A client to interact with a specific share, although that share may not yet exist.
-
-    For operations relating to a specific directory or file in this share, the clients for
-    those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the share,
-        use the :func:`from_share_url` classmethod.
-    :param share_name:
-        The name of the share with which to interact.
-    :type share_name: str
-    :param str snapshot:
-        An optional share snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`create_snapshot`.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-    """
-    def __init__( # type: ignore
-            self, account_url,  # type: str
-            share_name,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not share_name:
-            raise ValueError("Please specify a share name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-        if hasattr(credential, 'get_token'):
-            raise ValueError("Token credentials not supported by the File service.")
-
-        path_snapshot = None
-        path_snapshot, sas_token = parse_query(parsed_url.query)
-        if not sas_token and not credential:
-            raise ValueError(
-                'You need to provide either an account shared key or SAS token when creating a storage service.')
-        try:
-            self.snapshot = snapshot.snapshot # type: ignore
-        except AttributeError:
-            try:
-                self.snapshot = snapshot['snapshot'] # type: ignore
-            except TypeError:
-                self.snapshot = snapshot or path_snapshot
-
-        self.share_name = share_name
-        self._query_str, credential = self._format_query_string(
-            sas_token, credential, share_snapshot=self.snapshot)
-        super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    @classmethod
-    def from_share_url(cls, share_url,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> ShareClient
-        """
-        :param str share_url: The full URI to the share.
-        :param str snapshot:
-            An optional share snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`create_snapshot`.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A share client.
-        :rtype: ~azure.storage.fileshare.ShareClient
-        """
-        try:
-            if not share_url.lower().startswith('http'):
-                share_url = "https://" + share_url
-        except AttributeError:
-            raise ValueError("Share URL must be a string.")
-        parsed_url = urlparse(share_url.rstrip('/'))
-        if not (parsed_url.path and parsed_url.netloc):
-            raise ValueError("Invalid URL: {}".format(share_url))
-        account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query
-        path_snapshot, _ = parse_query(parsed_url.query)
-        share_name = unquote(parsed_url.path.lstrip('/'))
-        snapshot = snapshot or unquote(path_snapshot)
-
-        return cls(account_url, share_name, snapshot, credential, **kwargs)
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        share_name = self.share_name
-        if isinstance(share_name, six.text_type):
-            share_name = share_name.encode('UTF-8')
-        return "{}://{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(share_name),
-            self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            share_name, # type: str
-            snapshot=None,  # type: Optional[str]
-            credential=None, # type: Optional[Any]
-            **kwargs # type: Any
-        ):
-        # type: (...) -> ShareClient
-        """Create ShareClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param share_name: The name of the share.
-        :type share_name: str
-        :param str snapshot:
-            The optional share snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`create_snapshot`.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A share client.
-        :rtype: ~azure.storage.fileshare.ShareClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START create_share_client_from_conn_string]
-                :end-before: [END create_share_client_from_conn_string]
-                :language: python
-                :dedent: 8
-                :caption: Gets the share client from connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(
-            account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs)
-
-    def get_directory_client(self, directory_path=None):
-        # type: (Optional[str]) -> ShareDirectoryClient
-        """Get a client to interact with the specified directory.
-        The directory need not already exist.
-
-        :param str directory_path:
-            Path to the specified directory.
-        :returns: A Directory Client.
-        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
-        """
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-
-        return ShareDirectoryClient(
-            self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version,
-            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
-            _location_mode=self._location_mode)
-
-    def get_file_client(self, file_path):
-        # type: (str) -> ShareFileClient
-        """Get a client to interact with the specified file.
-        The file need not already exist.
-
-        :param str file_path:
-            Path to the specified file.
-        :returns: A File Client.
-        :rtype: ~azure.storage.fileshare.ShareFileClient
-        """
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-
-        return ShareFileClient(
-            self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version,
-            _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode)
-
-    @distributed_trace
-    def create_share(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Creates a new Share under the account. If a share with the
-        same name already exists, the operation fails.
-
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the share as metadata.
-        :keyword int quota:
-            The quota to be allotted.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START create_share]
-                :end-before: [END create_share]
-                :language: python
-                :dedent: 8
-                :caption: Creates a file share.
-        """
-        metadata = kwargs.pop('metadata', None)
-        quota = kwargs.pop('quota', None)
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-
-        try:
-            return self._client.share.create( # type: ignore
-                timeout=timeout,
-                metadata=metadata,
-                quota=quota,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def create_snapshot( # type: ignore
-            self,
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Creates a snapshot of the share.
-
-        A snapshot is a read-only version of a share that's taken at a point in time.
-        It can be read, copied, or deleted, but not modified. Snapshots provide a way
-        to back up a share as it appears at a moment in time.
-
-        A snapshot of a share has the same name as the base share from which the snapshot
-        is taken, with a DateTime value appended to indicate the time at which the
-        snapshot was taken.
-
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the share as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Snapshot ID, Etag, and last modified).
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START create_share_snapshot]
-                :end-before: [END create_share_snapshot]
-                :language: python
-                :dedent: 12
-                :caption: Creates a snapshot of the file share.
-        """
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return self._client.share.create_snapshot( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def delete_share(
-            self, delete_snapshots=False, # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified share for deletion. The share is
-        later deleted during garbage collection.
-
-        :param bool delete_snapshots:
-            Indicates if snapshots are to be deleted.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START delete_share]
-                :end-before: [END delete_share]
-                :language: python
-                :dedent: 12
-                :caption: Deletes the share and any snapshots.
-        """
-        timeout = kwargs.pop('timeout', None)
-        delete_include = None
-        if delete_snapshots:
-            delete_include = DeleteSnapshotsOptionType.include
-        try:
-            self._client.share.delete(
-                timeout=timeout,
-                sharesnapshot=self.snapshot,
-                delete_snapshots=delete_include,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_share_properties(self, **kwargs):
-        # type: (Any) -> ShareProperties
-        """Returns all user-defined metadata and system properties for the
-        specified share. The data returned does not include the shares's
-        list of files or directories.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: The share properties.
-        :rtype: ~azure.storage.fileshare.ShareProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_hello_world.py
-                :start-after: [START get_share_properties]
-                :end-before: [END get_share_properties]
-                :language: python
-                :dedent: 12
-                :caption: Gets the share properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            props = self._client.share.get_properties(
-                timeout=timeout,
-                sharesnapshot=self.snapshot,
-                cls=deserialize_share_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        props.name = self.share_name
-        props.snapshot = self.snapshot
-        return props # type: ignore
-
-    @distributed_trace
-    def set_share_quota(self, quota, **kwargs):
-        # type: (int, Any) ->  Dict[str, Any]
-        """Sets the quota for the share.
-
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes.
-            Must be greater than 0, and less than or equal to 5TB.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START set_share_quota]
-                :end-before: [END set_share_quota]
-                :language: python
-                :dedent: 12
-                :caption: Sets the share quota.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return self._client.share.set_quota( # type: ignore
-                timeout=timeout,
-                quota=quota,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_share_metadata(self, metadata, **kwargs):
-        # type: (Dict[str, Any], Any) ->  Dict[str, Any]
-        """Sets the metadata for the share.
-
-        Each call to this operation replaces all existing metadata
-        attached to the share. To remove all metadata from the share,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            Name-value pairs associated with the share as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START set_share_metadata]
-                :end-before: [END set_share_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Sets the share metadata.
-        """
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        try:
-            return self._client.share.set_metadata( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_share_access_policy(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the permissions for the share. The permissions
-        indicate whether files in a share may be accessed publicly.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Access policy information in a dict.
-        :rtype: dict[str, Any]
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response, identifiers = self._client.share.get_access_policy(
-                timeout=timeout,
-                cls=return_headers_and_deserialized,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return {
-            'public_access': response.get('share_public_access'),
-            'signed_identifiers': identifiers or []
-        }
-
-    @distributed_trace
-    def set_share_access_policy(self, signed_identifiers, **kwargs):
-        # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str]
-        """Sets the permissions for the share, or stored access
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether files in a share may be accessed publicly.
-
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the share. The
-            dictionary may contain up to 5 elements. An empty dictionary
-            will clear the access policies set on the service.
-        :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        timeout = kwargs.pop('timeout', None)
-        if len(signed_identifiers) > 5:
-            raise ValueError(
-                'Too many access policies provided. The server does not support setting '
-                'more than 5 access policies on a single resource.')
-        identifiers = []
-        for key, value in signed_identifiers.items():
-            if value:
-                value.start = serialize_iso(value.start)
-                value.expiry = serialize_iso(value.expiry)
-            identifiers.append(SignedIdentifier(id=key, access_policy=value))
-        signed_identifiers = identifiers # type: ignore
-        try:
-            return self._client.share.set_access_policy( # type: ignore
-                share_acl=signed_identifiers or None,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_share_stats(self, **kwargs):
-        # type: (Any) -> int
-        """Gets the approximate size of the data stored on the share in bytes.
-
-        Note that this value may not include all recently created
-        or recently re-sized files.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The approximate size of the data (in bytes) stored on the share.
-        :rtype: int
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            stats = self._client.share.get_statistics(
-                timeout=timeout,
-                **kwargs)
-            return stats.share_usage_bytes # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_directories_and_files(
-            self, directory_name=None,  # type: Optional[str]
-            name_starts_with=None,  # type: Optional[str]
-            marker=None,  # type: Optional[str]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> Iterable[Dict[str,str]]
-        """Lists the directories and files under the share.
-
-        :param str directory_name:
-            Name of a directory.
-        :param str name_starts_with:
-            Filters the results to return only directories whose names
-            begin with the specified prefix.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the
-            next_marker field of a previous generator object. If specified,
-            this generator will begin returning results from this point.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share.py
-                :start-after: [START share_list_files_in_dir]
-                :end-before: [END share_list_files_in_dir]
-                :language: python
-                :dedent: 12
-                :caption: List directories and files in the share.
-        """
-        timeout = kwargs.pop('timeout', None)
-        directory = self.get_directory_client(directory_name)
-        kwargs.setdefault('merge_span', True)
-        return directory.list_directories_and_files(
-            name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs)
-
-    @staticmethod
-    def _create_permission_for_share_options(file_permission,  # type: str
-                                             **kwargs):
-        options = {
-            'share_permission': SharePermission(permission=file_permission),
-            'cls': deserialize_permission_key,
-            'timeout': kwargs.pop('timeout', None),
-        }
-        options.update(kwargs)
-        return options
-
-    @distributed_trace
-    def create_permission_for_share(self, file_permission,  # type: str
-                                    **kwargs  # type: Any
-                                    ):
-        # type: (...) -> str
-        """Create a permission (a security descriptor) at the share level.
-
-        This 'permission' can be used for the files/directories in the share.
-        If a 'permission' already exists, it shall return the key of it, else
-        creates a new permission at the share level and return its key.
-
-        :param str file_permission:
-            File permission, a Portable SDDL
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A file permission key
-        :rtype: str
-        """
-        timeout = kwargs.pop('timeout', None)
-        options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs)
-        try:
-            return self._client.share.create_permission(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_permission_for_share(  # type: ignore
-            self, permission_key,  # type: str
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> str
-        """Get a permission (a security descriptor) for a given key.
-
-        This 'permission' can be used for the files/directories in the share.
-
-        :param str permission_key:
-            Key of the file permission to retrieve
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A file permission (a portable SDDL)
-        :rtype: str
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return self._client.share.get_permission(  # type: ignore
-                file_permission_key=permission_key,
-                cls=deserialize_permission,
-                timeout=timeout,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def create_directory(self, directory_name, **kwargs):
-        # type: (str, Any) -> ShareDirectoryClient
-        """Creates a directory in the share and returns a client to interact
-        with the directory.
-
-        :param str directory_name:
-            The name of the directory.
-        :keyword metadata:
-            Name-value pairs associated with the directory as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: ShareDirectoryClient
-        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
-        """
-        directory = self.get_directory_client(directory_name)
-        kwargs.setdefault('merge_span', True)
-        directory.create_directory(**kwargs)
-        return directory # type: ignore
-
-    @distributed_trace
-    def delete_directory(self, directory_name, **kwargs):
-        # type: (str, Any) -> None
-        """Marks the directory for deletion. The directory is
-        later deleted during garbage collection.
-
-        :param str directory_name:
-            The name of the directory.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        directory = self.get_directory_client(directory_name)
-        directory.delete_directory(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_service_client.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_service_client.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_service_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_share_service_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,373 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Dict, List,
-    TYPE_CHECKING
-)
-try:
-    from urllib.parse import urlparse
-except ImportError:
-    from urlparse import urlparse # type: ignore
-
-from azure.core.paging import ItemPaged
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.pipeline import Pipeline
-from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
-from ._shared.response_handlers import process_storage_error
-from ._generated import AzureFileStorage
-from ._generated.models import StorageErrorException, StorageServiceProperties
-from ._generated.version import VERSION
-from ._share_client import ShareClient
-from ._serialize import get_api_version
-from ._models import (
-    SharePropertiesPaged,
-    service_properties_deserialize,
-)
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from ._models import (
-        ShareProperties,
-        Metrics,
-        CorsRule,
-    )
-
-
-class ShareServiceClient(StorageAccountHostsMixin):
-    """A client to interact with the File Share Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete shares within the account.
-    For operations relating to a specific share, a client for that entity
-    can also be retrieved using the :func:`get_share_client` function.
-
-    :param str account_url:
-        The URL to the file share storage account. Any other entities included
-        in the URL path (e.g. share or file) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/file_samples_authentication.py
-            :start-after: [START create_share_service_client]
-            :end-before: [END create_share_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Create the share service client with url and credential.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-        if hasattr(credential, 'get_token'):
-            raise ValueError("Token credentials not supported by the File Share service.")
-
-        _, sas_token = parse_query(parsed_url.query)
-        if not sas_token and not credential:
-            raise ValueError(
-                'You need to provide either an account shared key or SAS token when creating a storage service.')
-        self._query_str, credential = self._format_query_string(sas_token, credential)
-        super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        return "{}://{}/{}".format(self.scheme, hostname, self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            credential=None, # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> ShareServiceClient
-        """Create ShareServiceClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param credential:
-            The credential with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string or an account
-            shared access key.
-        :returns: A File Share service client.
-        :rtype: ~azure.storage.fileshare.ShareServiceClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_authentication.py
-                :start-after: [START create_share_service_client_from_conn_string]
-                :end-before: [END create_share_service_client_from_conn_string]
-                :language: python
-                :dedent: 8
-                :caption: Create the share service client with connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(account_url, credential=credential, **kwargs)
-
-    @distributed_trace
-    def get_service_properties(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the properties of a storage account's File Share service, including
-        Azure Storage Analytics.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A dictionary containing file service properties such as
-            analytics logging, hour/minute metrics, cors rules, etc.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service.py
-                :start-after: [START get_service_properties]
-                :end-before: [END get_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Get file share service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            service_props = self._client.service.get_properties(timeout=timeout, **kwargs)
-            return service_properties_deserialize(service_props)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_service_properties(
-            self, hour_metrics=None,  # type: Optional[Metrics]
-            minute_metrics=None,  # type: Optional[Metrics]
-            cors=None,  # type: Optional[List[CorsRule]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Sets the properties of a storage account's File Share service, including
-        Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the
-        existing settings on the service for that functionality are preserved.
-
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request
-            statistics grouped by API in hourly aggregates for files.
-        :type hour_metrics: ~azure.storage.fileshare.Metrics
-        :param minute_metrics:
-            The minute metrics settings provide request statistics
-            for each minute for files.
-        :type minute_metrics: ~azure.storage.fileshare.Metrics
-        :param cors:
-            You can include up to five CorsRule elements in the
-            list. If an empty list is specified, all CORS rules will be deleted,
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.fileshare.CorsRule`)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service.py
-                :start-after: [START set_service_properties]
-                :end-before: [END set_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Sets file share service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        props = StorageServiceProperties(
-            hour_metrics=hour_metrics,
-            minute_metrics=minute_metrics,
-            cors=cors
-        )
-        try:
-            self._client.service.set_properties(props, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_shares(
-            self, name_starts_with=None,  # type: Optional[str]
-            include_metadata=False,  # type: Optional[bool]
-            include_snapshots=False, # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> ItemPaged[ShareProperties]
-        """Returns auto-paging iterable of dict-like ShareProperties under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all shares have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only shares whose names
-            begin with the specified name_starts_with.
-        :param bool include_metadata:
-            Specifies that share metadata be returned in the response.
-        :param bool include_snapshots:
-            Specifies that share snapshot be returned in the response.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) of ShareProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service.py
-                :start-after: [START fsc_list_shares]
-                :end-before: [END fsc_list_shares]
-                :language: python
-                :dedent: 12
-                :caption: List shares in the file share service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        include = []
-        if include_metadata:
-            include.append('metadata')
-        if include_snapshots:
-            include.append('snapshots')
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.service.list_shares_segment,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=SharePropertiesPaged)
-
-    @distributed_trace
-    def create_share(
-            self, share_name,  # type: str
-            **kwargs
-        ):
-        # type: (...) -> ShareClient
-        """Creates a new share under the specified account. If the share
-        with the same name already exists, the operation fails. Returns a client with
-        which to interact with the newly created share.
-
-        :param str share_name: The name of the share to create.
-        :keyword dict(str,str) metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :keyword int quota:
-            Quota in bytes.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.fileshare.ShareClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service.py
-                :start-after: [START fsc_create_shares]
-                :end-before: [END fsc_create_shares]
-                :language: python
-                :dedent: 8
-                :caption: Create a share in the file share service.
-        """
-        metadata = kwargs.pop('metadata', None)
-        quota = kwargs.pop('quota', None)
-        timeout = kwargs.pop('timeout', None)
-        share = self.get_share_client(share_name)
-        kwargs.setdefault('merge_span', True)
-        share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs)
-        return share
-
-    @distributed_trace
-    def delete_share(
-            self, share_name,  # type: Union[ShareProperties, str]
-            delete_snapshots=False, # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified share for deletion. The share is
-        later deleted during garbage collection.
-
-        :param share_name:
-            The share to delete. This can either be the name of the share,
-            or an instance of ShareProperties.
-        :type share_name: str or ~azure.storage.fileshare.ShareProperties
-        :param bool delete_snapshots:
-            Indicates if snapshots are to be deleted.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service.py
-                :start-after: [START fsc_delete_shares]
-                :end-before: [END fsc_delete_shares]
-                :language: python
-                :dedent: 12
-                :caption: Delete a share in the file share service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        share = self.get_share_client(share_name)
-        kwargs.setdefault('merge_span', True)
-        share.delete_share(
-            delete_snapshots=delete_snapshots, timeout=timeout, **kwargs)
-
-    def get_share_client(self, share, snapshot=None):
-        # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient
-        """Get a client to interact with the specified share.
-        The share need not already exist.
-
-        :param share:
-            The share. This can either be the name of the share,
-            or an instance of ShareProperties.
-        :type share: str or ~azure.storage.fileshare.ShareProperties
-        :param str snapshot:
-            An optional share snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`create_snapshot`.
-        :returns: A ShareClient.
-        :rtype: ~azure.storage.fileshare.ShareClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service.py
-                :start-after: [START get_share_client]
-                :end-before: [END get_share_client]
-                :language: python
-                :dedent: 8
-                :caption: Gets the share client.
-        """
-        try:
-            share_name = share.name
-        except AttributeError:
-            share_name = share
-
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ShareClient(
-            self.url, share_name=share_name, snapshot=snapshot, credential=self.credential,
-            api_version=self.api_version, _hosts=self._hosts,
-            _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,56 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-
-try:
-    from urllib.parse import quote, unquote
-except ImportError:
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-
-
-def url_quote(url):
-    return quote(url)
-
-
-def url_unquote(url):
-    return unquote(url)
-
-
-def encode_base64(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def decode_base64_to_bytes(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def decode_base64_to_text(data):
-    decoded_bytes = decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, six.text_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, six.text_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = encode_base64(digest)
-    return encoded_digest
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/authentication.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/authentication.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/authentication.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/authentication.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,136 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-import sys
-
-try:
-    from urllib.parse import urlparse, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import unquote # type: ignore
-
-try:
-    from yarl import URL
-except ImportError:
-    pass
-
-try:
-    from azure.core.pipeline.transport import AioHttpTransport
-except ImportError:
-    AioHttpTransport = None
-
-from azure.core.exceptions import ClientAuthenticationError
-from azure.core.pipeline.policies import SansIOHTTPPolicy
-
-from . import sign_string
-
-
-logger = logging.getLogger(__name__)
-
-
-
-# wraps a given exception with the desired exception type
-def _wrap_exception(ex, desired_type):
-    msg = ""
-    if ex.args:
-        msg = ex.args[0]
-    if sys.version_info >= (3,):
-        # Automatic chaining in Python 3 means we keep the trace
-        return desired_type(msg)
-    # There isn't a good solution in 2 for keeping the stack trace
-    # in general, or that will not result in an error in 3
-    # However, we can keep the previous error type and message
-    # TODO: In the future we will log the trace
-    return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
-
-
-class AzureSigningError(ClientAuthenticationError):
-    """
-    Represents a fatal error when attempting to sign a request.
-    In general, the cause of this exception is user error. For example, the given account key is not valid.
-    Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
-    """
-
-
-# pylint: disable=no-self-use
-class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
-
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-        super(SharedKeyCredentialPolicy, self).__init__()
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.http_request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = urlparse(request.http_request.url).path
-        try:
-            if isinstance(request.context.transport, AioHttpTransport) or \
-                isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport):
-                uri_path = URL(uri_path)
-                return '/' + self.account_name + str(uri_path)
-        except TypeError:
-            pass
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.http_request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.http_request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value is not None:
-                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
-
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        try:
-            signature = sign_string(self.account_key, string_to_sign)
-            auth_string = 'SharedKey ' + self.account_name + ':' + signature
-            request.http_request.headers['Authorization'] = auth_string
-        except Exception as ex:
-            # Wrap any error that occurred as signing error
-            # Doing so will clarify/locate the source of problem
-            raise _wrap_exception(ex, AzureSigningError)
-
-    def on_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        #logger.debug("String_to_sign=%s", string_to_sign)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,429 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union,
-    Optional,
-    Any,
-    Iterable,
-    Dict,
-    List,
-    Type,
-    Tuple,
-    TYPE_CHECKING,
-)
-import logging
-
-try:
-    from urllib.parse import parse_qs, quote
-except ImportError:
-    from urlparse import parse_qs  # type: ignore
-    from urllib2 import quote  # type: ignore
-
-import six
-
-from azure.core.configuration import Configuration
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline import Pipeline
-from azure.core.pipeline.transport import RequestsTransport, HttpTransport
-from azure.core.pipeline.policies import (
-    RedirectPolicy,
-    ContentDecodePolicy,
-    BearerTokenCredentialPolicy,
-    ProxyPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-    UserAgentPolicy,
-)
-
-from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .models import LocationMode
-from .authentication import SharedKeyCredentialPolicy
-from .shared_access_signature import QueryStringConstants
-from .policies import (
-    StorageHeadersPolicy,
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageResponseHook,
-    StorageLoggingPolicy,
-    StorageHosts,
-    QueueMessagePolicy,
-    ExponentialRetry,
-)
-from .._version import VERSION
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-
-_LOGGER = logging.getLogger(__name__)
-_SERVICE_PARAMS = {
-    "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
-    "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"},
-    "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"},
-    "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"},
-}
-
-
-class StorageAccountHostsMixin(object):  # pylint: disable=too-many-instance-attributes
-    def __init__(
-        self,
-        parsed_url,  # type: Any
-        service,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
-        self._hosts = kwargs.get("_hosts")
-        self.scheme = parsed_url.scheme
-
-        if service not in ["blob", "queue", "file-share", "dfs"]:
-            raise ValueError("Invalid service: {}".format(service))
-        service_name = service.split('-')[0]
-        account = parsed_url.netloc.split(".{}.core.".format(service_name))
-        self.account_name = account[0] if len(account) > 1 else None
-        secondary_hostname = None
-
-        self.credential = format_shared_key_credential(account, credential)
-        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
-            raise ValueError("Token credential is only supported with HTTPS.")
-        if hasattr(self.credential, "account_name"):
-            self.account_name = self.credential.account_name
-            secondary_hostname = "{}-secondary.{}.{}".format(
-                self.credential.account_name, service_name, SERVICE_HOST_BASE)
-
-        if not self._hosts:
-            if len(account) > 1:
-                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
-            if kwargs.get("secondary_hostname"):
-                secondary_hostname = kwargs["secondary_hostname"]
-            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
-            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
-
-        self.require_encryption = kwargs.get("require_encryption", False)
-        self.key_encryption_key = kwargs.get("key_encryption_key")
-        self.key_resolver_function = kwargs.get("key_resolver_function")
-        self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)
-
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-
-    def __exit__(self, *args):
-        self._client.__exit__(*args)
-
-    def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        self._client.close()
-
-    @property
-    def url(self):
-        """The full endpoint URL to this entity, including SAS token if used.
-
-        This could be either the primary endpoint,
-        or the secondary endpoint depending on the current :func:`location_mode`.
-        """
-        return self._format_url(self._hosts[self._location_mode])
-
-    @property
-    def primary_endpoint(self):
-        """The full primary endpoint URL.
-
-        :type: str
-        """
-        return self._format_url(self._hosts[LocationMode.PRIMARY])
-
-    @property
-    def primary_hostname(self):
-        """The hostname of the primary endpoint.
-
-        :type: str
-        """
-        return self._hosts[LocationMode.PRIMARY]
-
-    @property
-    def secondary_endpoint(self):
-        """The full secondary endpoint URL if configured.
-
-        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str
-        :raise ValueError:
-        """
-        if not self._hosts[LocationMode.SECONDARY]:
-            raise ValueError("No secondary host configured.")
-        return self._format_url(self._hosts[LocationMode.SECONDARY])
-
-    @property
-    def secondary_hostname(self):
-        """The hostname of the secondary endpoint.
-
-        If not available this will be None. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str or None
-        """
-        return self._hosts[LocationMode.SECONDARY]
-
-    @property
-    def location_mode(self):
-        """The location mode that the client is currently using.
-
-        By default this will be "primary". Options include "primary" and "secondary".
-
-        :type: str
-        """
-
-        return self._location_mode
-
-    @location_mode.setter
-    def location_mode(self, value):
-        if self._hosts.get(value):
-            self._location_mode = value
-            self._client._config.url = self.url  # pylint: disable=protected-access
-        else:
-            raise ValueError("No host URL for location mode: {}".format(value))
-
-    @property
-    def api_version(self):
-        """The version of the Storage API used for requests.
-
-        :type: str
-        """
-        return self._client._config.version  # pylint: disable=protected-access
-
-    def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None):
-        query_str = "?"
-        if snapshot:
-            query_str += "snapshot={}&".format(self.snapshot)
-        if share_snapshot:
-            query_str += "sharesnapshot={}&".format(self.snapshot)
-        if sas_token and not credential:
-            query_str += sas_token
-        elif is_credential_sastoken(credential):
-            query_str += credential.lstrip("?")
-            credential = None
-        return query_str.rstrip("?&"), credential
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, "get_token"):
-            self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-
-        config = kwargs.get("_configuration") or create_configuration(**kwargs)
-        if kwargs.get("_pipeline"):
-            return config, kwargs["_pipeline"]
-        config.transport = kwargs.get("transport")  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            config.transport = RequestsTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            RedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs),
-            config.retry_policy,
-            config.logging_policy,
-            StorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs)
-        ]
-        return config, Pipeline(config.transport, policies=policies)
-
-    def _batch_send(
-        self, *reqs,  # type: HttpRequest
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ]
-        )
-
-        pipeline_response = self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts()
-            if raise_on_any_failure:
-                parts = list(response.parts())
-                if any(p for p in parts if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts
-                    )
-                    raise error
-                return iter(parts)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-class TransportWrapper(HttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, transport):
-        self._transport = transport
-
-    def send(self, request, **kwargs):
-        return self._transport.send(request, **kwargs)
-
-    def open(self):
-        pass
-
-    def close(self):
-        pass
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, *args):  # pylint: disable=arguments-differ
-        pass
-
-
-def format_shared_key_credential(account, credential):
-    if isinstance(credential, six.string_types):
-        if len(account) < 2:
-            raise ValueError("Unable to determine account name for shared key credential.")
-        credential = {"account_name": account[0], "account_key": credential}
-    if isinstance(credential, dict):
-        if "account_name" not in credential:
-            raise ValueError("Shared key credential missing 'account_name")
-        if "account_key" not in credential:
-            raise ValueError("Shared key credential missing 'account_key")
-        return SharedKeyCredentialPolicy(**credential)
-    return credential
-
-
-def parse_connection_str(conn_str, credential, service):
-    conn_str = conn_str.rstrip(";")
-    conn_settings = [s.split("=", 1) for s in conn_str.split(";")]
-    if any(len(tup) != 2 for tup in conn_settings):
-        raise ValueError("Connection string is either blank or malformed.")
-    conn_settings = dict(conn_settings)
-    endpoints = _SERVICE_PARAMS[service]
-    primary = None
-    secondary = None
-    if not credential:
-        try:
-            credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]}
-        except KeyError:
-            credential = conn_settings.get("SharedAccessSignature")
-    if endpoints["primary"] in conn_settings:
-        primary = conn_settings[endpoints["primary"]]
-        if endpoints["secondary"] in conn_settings:
-            secondary = conn_settings[endpoints["secondary"]]
-    else:
-        if endpoints["secondary"] in conn_settings:
-            raise ValueError("Connection string specifies only secondary endpoint.")
-        try:
-            primary = "{}://{}.{}.{}".format(
-                conn_settings["DefaultEndpointsProtocol"],
-                conn_settings["AccountName"],
-                service,
-                conn_settings["EndpointSuffix"],
-            )
-            secondary = "{}-secondary.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings["EndpointSuffix"]
-            )
-        except KeyError:
-            pass
-
-    if not primary:
-        try:
-            primary = "https://{}.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE)
-            )
-        except KeyError:
-            raise ValueError("Connection string missing required connection details.")
-    return primary, secondary, credential
-
-
-def create_configuration(**kwargs):
-    # type: (**Any) -> Configuration
-    config = Configuration(**kwargs)
-    config.headers_policy = StorageHeadersPolicy(**kwargs)
-    config.user_agent_policy = UserAgentPolicy(
-        sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs)
-    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
-    config.logging_policy = StorageLoggingPolicy(**kwargs)
-    config.proxy_policy = ProxyPolicy(**kwargs)
-
-    # Storage settings
-    config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024)
-    config.copy_polling_interval = 15
-
-    # Block blob uploads
-    config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024)
-    config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1)
-    config.use_byte_buffer = kwargs.get("use_byte_buffer", False)
-
-    # Page blob uploads
-    config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024)
-
-    # Blob downloads
-    config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024)
-    config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024)
-
-    # File uploads
-    config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024)
-    return config
-
-
-def parse_query(query_str):
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
-    sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values]
-    sas_token = None
-    if sas_params:
-        sas_token = "&".join(sas_params)
-
-    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
-    return snapshot, sas_token
-
-
-def is_credential_sastoken(credential):
-    if not credential or not isinstance(credential, six.string_types):
-        return False
-
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = parse_qs(credential.lstrip("?"))
-    if parsed_query and all([k in sas_values for k in parsed_query.keys()]):
-        return True
-    return False
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/base_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,176 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-from azure.core.pipeline import AsyncPipeline
-from azure.core.async_paging import AsyncList
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline.policies import (
-    ContentDecodePolicy,
-    AsyncBearerTokenCredentialPolicy,
-    AsyncRedirectPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-)
-from azure.core.pipeline.transport import AsyncHttpTransport
-
-from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .authentication import SharedKeyCredentialPolicy
-from .base_client import create_configuration
-from .policies import (
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageHosts,
-    StorageHeadersPolicy,
-    QueueMessagePolicy
-)
-from .policies_async import AsyncStorageResponseHook
-
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import Pipeline
-    from azure.core.pipeline.transport import HttpRequest
-    from azure.core.configuration import Configuration
-_LOGGER = logging.getLogger(__name__)
-
-
-class AsyncStorageAccountHostsMixin(object):
-
-    def __enter__(self):
-        raise TypeError("Async client only supports 'async with'.")
-
-    def __exit__(self, *args):
-        pass
-
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-
-    async def __aexit__(self, *args):
-        await self._client.__aexit__(*args)
-
-    async def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        await self._client.close()
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, 'get_token'):
-            self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-        config = kwargs.get('_configuration') or create_configuration(**kwargs)
-        if kwargs.get('_pipeline'):
-            return config, kwargs['_pipeline']
-        config.transport = kwargs.get('transport')  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            try:
-                from azure.core.pipeline.transport import AioHttpTransport
-            except ImportError:
-                raise ImportError("Unable to create async transport. Please check aiohttp is installed.")
-            config.transport = AioHttpTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            AsyncRedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs), # type: ignore
-            config.retry_policy,
-            config.logging_policy,
-            AsyncStorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs),
-        ]
-        return config, AsyncPipeline(config.transport, policies=policies)
-
-    async def _batch_send(
-        self, *reqs: 'HttpRequest',
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ]
-        )
-
-        pipeline_response = await self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts() # Return an AsyncIterator
-            if raise_on_any_failure:
-                parts_list = []
-                async for part in parts:
-                    parts_list.append(part)
-                if any(p for p in parts_list if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts_list
-                    )
-                    raise error
-                return AsyncList(parts_list)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-
-class AsyncTransportWrapper(AsyncHttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, async_transport):
-        self._transport = async_transport
-
-    async def send(self, request, **kwargs):
-        return await self._transport.send(request, **kwargs)
-
-    async def open(self):
-        pass
-
-    async def close(self):
-        pass
-
-    async def __aenter__(self):
-        pass
-
-    async def __aexit__(self, *args):  # pylint: disable=arguments-differ
-        pass
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/constants.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/constants.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-from .._generated.version import VERSION
-
-
-X_MS_VERSION = VERSION
-
-# Socket timeout in seconds
-CONNECTION_TIMEOUT = 20
-READ_TIMEOUT = 20
-
-# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
-# The socket timeout is now the maximum total duration to send all data.
-if sys.version_info >= (3, 5):
-    # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
-    # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
-    READ_TIMEOUT = 2000
-
-STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
-
-SERVICE_HOST_BASE = 'core.windows.net'
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/encryption.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/encryption.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,542 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-from os import urandom
-from json import (
-    dumps,
-    loads,
-)
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from azure.core.exceptions import HttpResponseError
-
-from .._version import VERSION
-from . import encode_base64, decode_base64_to_bytes
-
-
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError('{0} should not be None.'.format(param_name))
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier,
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError("Unsupported encryption version.")
-    except KeyError:
-        raise ValueError("Unsupported encryption version.")
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
-        raise ValueError('Encryption version is not supported.')
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-    if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
-        raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
-
-
-def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
-    Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
-
-
-def encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the upload_data_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                 content, start_offset, end_offset, response_headers):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
-    except:  # pylint: disable=bare-except
-        if require_encryption:
-            raise ValueError(
-                'Encryption required, but received data does not contain appropriate metatadata.' + \
-                'Data was either not encrypted or metadata has been lost.')
-
-        return content
-
-    if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    blob_type = response_headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    if 'content-range' in response_headers:
-        content_range = response_headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
-
-
-def encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError('Message was not encrypted.')
-
-        return message
-    try:
-        return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception as error:
-        raise HttpResponseError(
-            message="Decryption failed.",
-            response=response,
-            error=error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/models.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/models.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,449 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-def get_enum_value(value):
-    if value is None or value in ["None", ""]:
-        return None
-    try:
-        return value.value
-    except AttributeError:
-        return value
-
-
-class StorageErrorCode(str, Enum):
-
-    # Generic storage values
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    no_authentication_information = "NoAuthenticationInformation"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-
-    # Blob values
-    append_position_condition_not_met = "AppendPositionConditionNotMet"
-    blob_already_exists = "BlobAlreadyExists"
-    blob_not_found = "BlobNotFound"
-    blob_overwritten = "BlobOverwritten"
-    blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
-    block_count_exceeds_limit = "BlockCountExceedsLimit"
-    block_list_too_long = "BlockListTooLong"
-    cannot_change_to_lower_tier = "CannotChangeToLowerTier"
-    cannot_verify_copy_source = "CannotVerifyCopySource"
-    container_already_exists = "ContainerAlreadyExists"
-    container_being_deleted = "ContainerBeingDeleted"
-    container_disabled = "ContainerDisabled"
-    container_not_found = "ContainerNotFound"
-    content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
-    copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
-    copy_id_mismatch = "CopyIdMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-    incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
-    incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
-    incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
-    infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
-    invalid_blob_or_block = "InvalidBlobOrBlock"
-    invalid_blob_tier = "InvalidBlobTier"
-    invalid_blob_type = "InvalidBlobType"
-    invalid_block_id = "InvalidBlockId"
-    invalid_block_list = "InvalidBlockList"
-    invalid_operation = "InvalidOperation"
-    invalid_page_range = "InvalidPageRange"
-    invalid_source_blob_type = "InvalidSourceBlobType"
-    invalid_source_blob_url = "InvalidSourceBlobUrl"
-    invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
-    lease_already_present = "LeaseAlreadyPresent"
-    lease_already_broken = "LeaseAlreadyBroken"
-    lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
-    lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
-    lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
-    lease_id_missing = "LeaseIdMissing"
-    lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
-    lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
-    lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
-    lease_lost = "LeaseLost"
-    lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
-    lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
-    lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
-    max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
-    no_pending_copy_operation = "NoPendingCopyOperation"
-    operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
-    pending_copy_operation = "PendingCopyOperation"
-    previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
-    previous_snapshot_not_found = "PreviousSnapshotNotFound"
-    previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
-    sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
-    sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
-    snapshot_count_exceeded = "SnapshotCountExceeded"
-    snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
-    snapshots_present = "SnapshotsPresent"
-    source_condition_not_met = "SourceConditionNotMet"
-    system_in_use = "SystemInUse"
-    target_condition_not_met = "TargetConditionNotMet"
-    unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
-    blob_being_rehydrated = "BlobBeingRehydrated"
-    blob_archived = "BlobArchived"
-    blob_not_archived = "BlobNotArchived"
-
-    # Queue values
-    invalid_marker = "InvalidMarker"
-    message_not_found = "MessageNotFound"
-    message_too_large = "MessageTooLarge"
-    pop_receipt_mismatch = "PopReceiptMismatch"
-    queue_already_exists = "QueueAlreadyExists"
-    queue_being_deleted = "QueueBeingDeleted"
-    queue_disabled = "QueueDisabled"
-    queue_not_empty = "QueueNotEmpty"
-    queue_not_found = "QueueNotFound"
-
-    # File values
-    cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
-    client_cache_flush_delay = "ClientCacheFlushDelay"
-    delete_pending = "DeletePending"
-    directory_not_empty = "DirectoryNotEmpty"
-    file_lock_conflict = "FileLockConflict"
-    invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
-    parent_not_found = "ParentNotFound"
-    read_only_attribute = "ReadOnlyAttribute"
-    share_already_exists = "ShareAlreadyExists"
-    share_being_deleted = "ShareBeingDeleted"
-    share_disabled = "ShareDisabled"
-    share_not_found = "ShareNotFound"
-    sharing_violation = "SharingViolation"
-    share_snapshot_in_progress = "ShareSnapshotInProgress"
-    share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
-    share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
-    share_has_snapshots = "ShareHasSnapshots"
-    container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
-
-    # DataLake values
-    content_length_must_be_zero = 'ContentLengthMustBeZero'
-    path_already_exists = 'PathAlreadyExists'
-    invalid_flush_position = 'InvalidFlushPosition'
-    invalid_property_name = 'InvalidPropertyName'
-    invalid_source_uri = 'InvalidSourceUri'
-    unsupported_rest_version = 'UnsupportedRestVersion'
-    file_system_not_found = 'FilesystemNotFound'
-    path_not_found = 'PathNotFound'
-    rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound'
-    source_path_not_found = 'SourcePathNotFound'
-    destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted'
-    file_system_already_exists = 'FilesystemAlreadyExists'
-    file_system_being_deleted = 'FilesystemBeingDeleted'
-    invalid_destination_path = 'InvalidDestinationPath'
-    invalid_rename_source_path = 'InvalidRenameSourcePath'
-    invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType'
-    lease_is_already_broken = 'LeaseIsAlreadyBroken'
-    lease_name_mismatch = 'LeaseNameMismatch'
-    path_conflict = 'PathConflict'
-    source_path_is_being_deleted = 'SourcePathIsBeingDeleted'
-
-
-class DictMixin(object):
-
-    def __setitem__(self, key, item):
-        self.__dict__[key] = item
-
-    def __getitem__(self, key):
-        return self.__dict__[key]
-
-    def __repr__(self):
-        return str(self)
-
-    def __len__(self):
-        return len(self.keys())
-
-    def __delitem__(self, key):
-        self.__dict__[key] = None
-
-    def __eq__(self, other):
-        """Compare objects by comparing all attributes."""
-        if isinstance(other, self.__class__):
-            return self.__dict__ == other.__dict__
-        return False
-
-    def __ne__(self, other):
-        """Compare objects by comparing all attributes."""
-        return not self.__eq__(other)
-
-    def __str__(self):
-        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
-
-    def has_key(self, k):
-        return k in self.__dict__
-
-    def update(self, *args, **kwargs):
-        return self.__dict__.update(*args, **kwargs)
-
-    def keys(self):
-        return [k for k in self.__dict__ if not k.startswith('_')]
-
-    def values(self):
-        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def items(self):
-        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def get(self, key, default=None):
-        if key in self.__dict__:
-            return self.__dict__[key]
-        return default
-
-
-class LocationMode(object):
-    """
-    Specifies the location the request should be sent to. This mode only applies
-    for RA-GRS accounts which allow secondary read access. All other account types
-    must use PRIMARY.
-    """
-
-    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
-    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
-
-
-class ResourceTypes(object):
-    """
-    Specifies the resource types that are accessible with the account SAS.
-
-    :param bool service:
-        Access to service-level APIs (e.g., Get/Set Service Properties,
-        Get Service Stats, List Containers/Queues/Shares)
-    :param bool container:
-        Access to container-level APIs (e.g., Create/Delete Container,
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories)
-    :param bool object:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
-    """
-
-    def __init__(self, service=False, container=False, object=False):  # pylint: disable=redefined-builtin
-        self.service = service
-        self.container = container
-        self.object = object
-        self._str = (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create a ResourceTypes from a string.
-
-        To specify service, container, or object you need only to
-        include the first letter of the word in the string. E.g. service and container,
-        you would provide a string "sc".
-
-        :param str string: Specify service, container, or object in
-            in the string with the first letter of the word.
-        :return: A ResourceTypes object
-        :rtype: ~azure.storage.fileshare.ResourceTypes
-        """
-        res_service = 's' in string
-        res_container = 'c' in string
-        res_object = 'o' in string
-
-        parsed = cls(res_service, res_container, res_object)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class AccountSasPermissions(object):
-    """
-    :class:`~ResourceTypes` class to be used with generate_account_sas
-    function and for the AccessPolicies used with set_*_acl. There are two types of
-    SAS which may be used to grant resource access. One is to grant access to a
-    specific resource (resource-specific). Another is to grant access to the
-    entire service for a specific account and allow certain operations based on
-    perms found here.
-
-    :param bool read:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits read permissions to the specified resource type.
-    :param bool write:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits write permissions to the specified resource type.
-    :param bool delete:
-        Valid for Container and Object resource types, except for queue messages.
-    :param bool list:
-        Valid for Service and Container resource types only.
-    :param bool add:
-        Valid for the following Object resource types only: queue messages, and append blobs.
-    :param bool create:
-        Valid for the following Object resource types only: blobs and files.
-        Users can create new blobs or files, but may not overwrite existing
-        blobs or files.
-    :param bool update:
-        Valid for the following Object resource types only: queue messages.
-    :param bool process:
-        Valid for the following Object resource type only: queue messages.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
-                 add=False, create=False, update=False, process=False):
-        self.read = read
-        self.write = write
-        self.delete = delete
-        self.list = list
-        self.add = add
-        self.create = create
-        self.update = update
-        self.process = process
-        self._str = (('r' if self.read else '') +
-                     ('w' if  self.write else '') +
-                     ('d' if self.delete else '') +
-                     ('l' if self.list else '') +
-                     ('a' if self.add else '') +
-                     ('c' if self.create else '') +
-                     ('u' if self.update else '') +
-                     ('p' if self.process else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create AccountSasPermissions from a string.
-
-        To specify read, write, delete, etc. permissions you need only to
-        include the first letter of the word in the string. E.g. for read and write
-        permissions you would provide a string "rw".
-
-        :param str permission: Specify permissions in
-            the string with the first letter of the word.
-        :return: An AccountSasPermissions object
-        :rtype: ~azure.storage.fileshare.AccountSasPermissions
-        """
-        p_read = 'r' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-        p_list = 'l' in permission
-        p_add = 'a' in permission
-        p_create = 'c' in permission
-        p_update = 'u' in permission
-        p_process = 'p' in permission
-
-        parsed = cls(p_read, p_write, p_delete, p_list, p_add, p_create, p_update, p_process)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-
-class Services(object):
-    """Specifies the services accessible with the account SAS.
-
-    :param bool blob:
-        Access for the `~azure.storage.blob.BlobServiceClient`
-    :param bool queue:
-        Access for the `~azure.storage.queue.QueueServiceClient`
-    :param bool fileshare:
-        Access for the `~azure.storage.fileshare.ShareServiceClient`
-    """
-
-    def __init__(self, blob=False, queue=False, fileshare=False):
-        self.blob = blob
-        self.queue = queue
-        self.fileshare = fileshare
-        self._str = (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('f' if self.fileshare else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create Services from a string.
-
-        To specify blob, queue, or file you need only to
-        include the first letter of the word in the string. E.g. for blob and queue
-        you would provide a string "bq".
-
-        :param str string: Specify blob, queue, or file in
-            in the string with the first letter of the word.
-        :return: A Services object
-        :rtype: ~azure.storage.fileshare.Services
-        """
-        res_blob = 'b' in string
-        res_queue = 'q' in string
-        res_file = 'f' in string
-
-        parsed = cls(res_blob, res_queue, res_file)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class UserDelegationKey(object):
-    """
-    Represents a user delegation key, provided to the user by Azure Storage
-    based on their Azure Active Directory access token.
-
-    The fields are saved as simple strings since the user does not have to interact with this object;
-    to generate an identify SAS, the user can simply pass it to the right API.
-
-    :ivar str signed_oid:
-        Object ID of this token.
-    :ivar str signed_tid:
-        Tenant ID of the tenant that issued this token.
-    :ivar str signed_start:
-        The datetime this token becomes valid.
-    :ivar str signed_expiry:
-        The datetime this token expires.
-    :ivar str signed_service:
-        What service this key is valid for.
-    :ivar str signed_version:
-        The version identifier of the REST service that created this token.
-    :ivar str value:
-        The user delegation key.
-    """
-    def __init__(self):
-        self.signed_oid = None
-        self.signed_tid = None
-        self.signed_start = None
-        self.signed_expiry = None
-        self.signed_service = None
-        self.signed_version = None
-        self.value = None
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/parser.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/parser.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/parser.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/parser.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):  # pylint: disable=undefined-variable
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,610 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import re
-import random
-from time import time
-from io import SEEK_SET, UnsupportedOperation
-import logging
-import uuid
-import types
-from typing import Any, TYPE_CHECKING
-from wsgiref.handlers import format_date_time
-try:
-    from urllib.parse import (
-        urlparse,
-        parse_qsl,
-        urlunparse,
-        urlencode,
-    )
-except ImportError:
-    from urllib import urlencode # type: ignore
-    from urlparse import ( # type: ignore
-        urlparse,
-        parse_qsl,
-        urlunparse,
-    )
-
-from azure.core.pipeline.policies import (
-    HeadersPolicy,
-    SansIOHTTPPolicy,
-    NetworkTraceLoggingPolicy,
-    HTTPPolicy,
-    RequestHistory
-)
-from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
-
-from .models import LocationMode
-
-try:
-    _unicode_type = unicode # type: ignore
-except NameError:
-    _unicode_type = str
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def is_exhausted(settings):
-    """Are we out of retries?"""
-    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
-    retry_counts = list(filter(None, retry_counts))
-    if not retry_counts:
-        return False
-    return min(retry_counts) < 0
-
-
-def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
-
-
-def is_retry(response, mode):
-    """Is this method/status code retryable? (Based on whitelists and control
-    variables such as the number of total retries to allow, whether to
-    respect the Retry-After header, whether this header is present, and
-    whether the returned status code is on the list of status codes to
-    be retried upon on the presence of the aforementioned header)
-    """
-    status = response.http_response.status_code
-    if 300 <= status < 500:
-        # An exception occured, but in most cases it was expected. Examples could
-        # include a 309 Conflict or 412 Precondition Failed.
-        if status == 404 and mode == LocationMode.SECONDARY:
-            # Response code 404 should be retried if secondary was used.
-            return True
-        if status == 408:
-            # Response code 408 is a timeout and should be retried.
-            return True
-        return False
-    if status >= 500:
-        # Response codes above 500 with the exception of 501 Not Implemented and
-        # 505 Version Not Supported indicate a server issue and should be retried.
-        if status in [501, 505]:
-            return False
-        return True
-    return False
-
-
-def urljoin(base_url, stub_url):
-    parsed = urlparse(base_url)
-    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
-    return parsed.geturl()
-
-
-class QueueMessagePolicy(SansIOHTTPPolicy):
-
-    def on_request(self, request):
-        message_id = request.context.options.pop('queue_message_id', None)
-        if message_id:
-            request.http_request.url = urljoin(
-                request.http_request.url,
-                message_id)
-
-
-class StorageHeadersPolicy(HeadersPolicy):
-    request_id_header_name = 'x-ms-client-request-id'
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        super(StorageHeadersPolicy, self).on_request(request)
-        current_time = format_date_time(time())
-        request.http_request.headers['x-ms-date'] = current_time
-
-        custom_id = request.context.options.pop('client_request_id', None)
-        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
-
-    # def on_response(self, request, response):
-    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
-    #     if self.request_id_header_name in response.http_response.headers:
-
-    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
-
-    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
-    #             raise AzureError(
-    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
-    #                 "Service request ID: {}".format(
-    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
-    #                     response.http_response.headers['x-ms-request-id']),
-    #                 response=response.http_response
-    #             )
-
-
-class StorageHosts(SansIOHTTPPolicy):
-
-    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
-        self.hosts = hosts
-        super(StorageHosts, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        request.context.options['hosts'] = self.hosts
-        parsed_url = urlparse(request.http_request.url)
-
-        # Detect what location mode we're currently requesting with
-        location_mode = LocationMode.PRIMARY
-        for key, value in self.hosts.items():
-            if parsed_url.netloc == value:
-                location_mode = key
-
-        # See if a specific location mode has been specified, and if so, redirect
-        use_location = request.context.options.pop('use_location', None)
-        if use_location:
-            # Lock retries to the specific location
-            request.context.options['retry_to_secondary'] = False
-            if use_location not in self.hosts:
-                raise ValueError("Attempting to use undefined host location {}".format(use_location))
-            if use_location != location_mode:
-                # Update request URL to use the specified location
-                updated = parsed_url._replace(netloc=self.hosts[use_location])
-                request.http_request.url = updated.geturl()
-                location_mode = use_location
-
-        request.context.options['location_mode'] = location_mode
-
-
-class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
-    """A policy that logs HTTP request and response to the DEBUG logger.
-
-    This accepts both global configuration, and per-request level with "enable_http_logger"
-    """
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        http_request = request.http_request
-        options = request.context.options
-        if options.pop("logging_enable", self.enable_http_logger):
-            request.context["logging_enable"] = True
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                log_url = http_request.url
-                query_params = http_request.query
-                if 'sig' in query_params:
-                    log_url = log_url.replace(query_params['sig'], "sig=*****")
-                _LOGGER.debug("Request URL: %r", log_url)
-                _LOGGER.debug("Request method: %r", http_request.method)
-                _LOGGER.debug("Request headers:")
-                for header, value in http_request.headers.items():
-                    if header.lower() == 'authorization':
-                        value = '*****'
-                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
-                        # take the url apart and scrub away the signed signature
-                        scheme, netloc, path, params, query, fragment = urlparse(value)
-                        parsed_qs = dict(parse_qsl(query))
-                        parsed_qs['sig'] = '*****'
-
-                        # the SAS needs to be put back together
-                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
-
-                    _LOGGER.debug("    %r: %r", header, value)
-                _LOGGER.debug("Request body:")
-
-                # We don't want to log the binary data of a file upload.
-                if isinstance(http_request.body, types.GeneratorType):
-                    _LOGGER.debug("File upload")
-                else:
-                    _LOGGER.debug(str(http_request.body))
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log request: %r", err)
-
-    def on_response(self, request, response):
-        # type: (PipelineRequest, PipelineResponse, Any) -> None
-        if response.context.pop("logging_enable", self.enable_http_logger):
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                _LOGGER.debug("Response status: %r", response.http_response.status_code)
-                _LOGGER.debug("Response headers:")
-                for res_header, value in response.http_response.headers.items():
-                    _LOGGER.debug("    %r: %r", res_header, value)
-
-                # We don't want to log binary data if the response is a file.
-                _LOGGER.debug("Response content:")
-                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
-                header = response.http_response.headers.get('content-disposition')
-
-                if header and pattern.match(header):
-                    filename = header.partition('=')[2]
-                    _LOGGER.debug("File attachments: %s", filename)
-                elif response.http_response.headers.get("content-type", "").endswith("octet-stream"):
-                    _LOGGER.debug("Body contains binary data.")
-                elif response.http_response.headers.get("content-type", "").startswith("image"):
-                    _LOGGER.debug("Body contains image data.")
-                else:
-                    if response.context.options.get('stream', False):
-                        _LOGGER.debug("Body is streamable")
-                    else:
-                        _LOGGER.debug(response.http_response.text())
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log response: %s", repr(err))
-
-
-class StorageRequestHook(SansIOHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._request_callback = kwargs.get('raw_request_hook')
-        super(StorageRequestHook, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, **Any) -> PipelineResponse
-        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
-        if request_callback:
-            request_callback(request)
-
-
-class StorageResponseHook(HTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(StorageResponseHook, self).__init__()
-
-    def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = self.next.send(request)
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-
-class StorageContentValidation(SansIOHTTPPolicy):
-    """A simple policy that sends the given headers
-    with the request.
-
-    This will overwrite any headers already defined in the request.
-    """
-    header_name = 'Content-MD5'
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        super(StorageContentValidation, self).__init__()
-
-    @staticmethod
-    def get_content_md5(data):
-        md5 = hashlib.md5()
-        if isinstance(data, bytes):
-            md5.update(data)
-        elif hasattr(data, 'read'):
-            pos = 0
-            try:
-                pos = data.tell()
-            except:  # pylint: disable=bare-except
-                pass
-            for chunk in iter(lambda: data.read(4096), b""):
-                md5.update(chunk)
-            try:
-                data.seek(pos, SEEK_SET)
-            except (AttributeError, IOError):
-                raise ValueError("Data should be bytes or a seekable file-like object.")
-        else:
-            raise ValueError("Data should be bytes or a seekable file-like object.")
-
-        return md5.digest()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        validate_content = request.context.options.pop('validate_content', False)
-        if validate_content and request.http_request.method != 'GET':
-            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
-            request.http_request.headers[self.header_name] = computed_md5
-            request.context['validate_content_md5'] = computed_md5
-        request.context['validate_content'] = validate_content
-
-    def on_response(self, request, response):
-        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
-            computed_md5 = request.context.get('validate_content_md5') or \
-                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
-            if response.http_response.headers['content-md5'] != computed_md5:
-                raise AzureError(
-                    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format(
-                        response.http_response.headers['content-md5'], computed_md5),
-                    response=response.http_response
-                )
-
-
-class StorageRetryPolicy(HTTPPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    def __init__(self, **kwargs):
-        self.total_retries = kwargs.pop('retry_total', 10)
-        self.connect_retries = kwargs.pop('retry_connect', 3)
-        self.read_retries = kwargs.pop('retry_read', 3)
-        self.status_retries = kwargs.pop('retry_status', 3)
-        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
-        super(StorageRetryPolicy, self).__init__()
-
-    def _set_next_host_location(self, settings, request):  # pylint: disable=no-self-use
-        """
-        A function which sets the next host location on the request, if applicable.
-
-        :param ~azure.storage.models.RetryContext context:
-            The retry context containing the previous host location and the request
-            to evaluate and possibly modify.
-        """
-        if settings['hosts'] and all(settings['hosts'].values()):
-            url = urlparse(request.url)
-            # If there's more than one possible location, retry to the alternative
-            if settings['mode'] == LocationMode.PRIMARY:
-                settings['mode'] = LocationMode.SECONDARY
-            else:
-                settings['mode'] = LocationMode.PRIMARY
-            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
-            request.url = updated.geturl()
-
-    def configure_retries(self, request):  # pylint: disable=no-self-use
-        body_position = None
-        if hasattr(request.http_request.body, 'read'):
-            try:
-                body_position = request.http_request.body.tell()
-            except (AttributeError, UnsupportedOperation):
-                # if body position cannot be obtained, then retries will not work
-                pass
-        options = request.context.options
-        return {
-            'total': options.pop("retry_total", self.total_retries),
-            'connect': options.pop("retry_connect", self.connect_retries),
-            'read': options.pop("retry_read", self.read_retries),
-            'status': options.pop("retry_status", self.status_retries),
-            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
-            'mode': options.pop("location_mode", LocationMode.PRIMARY),
-            'hosts': options.pop("hosts", None),
-            'hook': options.pop("retry_hook", None),
-            'body_position': body_position,
-            'count': 0,
-            'history': []
-        }
-
-    def get_backoff_time(self, settings):  # pylint: disable=unused-argument,no-self-use
-        """ Formula for computing the current backoff.
-        Should be calculated by child class.
-
-        :rtype: float
-        """
-        return 0
-
-    def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        transport.sleep(backoff)
-
-    def increment(self, settings, request, response=None, error=None):
-        """Increment the retry counters.
-
-        :param response: A pipeline response object.
-        :param error: An error encountered during the request, or
-            None if the response was received successfully.
-
-        :return: Whether the retry attempts are exhausted.
-        """
-        settings['total'] -= 1
-
-        if error and isinstance(error, ServiceRequestError):
-            # Errors when we're fairly sure that the server did not receive the
-            # request, so it should be safe to retry.
-            settings['connect'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        elif error and isinstance(error, ServiceResponseError):
-            # Errors that occur after the request has been started, so we should
-            # assume that the server began processing it.
-            settings['read'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        else:
-            # Incrementing because of a server error like a 500 in
-            # status_forcelist and a the given method is in the whitelist
-            if response:
-                settings['status'] -= 1
-                settings['history'].append(RequestHistory(request, http_response=response))
-
-        if not is_exhausted(settings):
-            if request.method not in ['PUT'] and settings['retry_secondary']:
-                self._set_next_host_location(settings, request)
-
-            # rewind the request body if it is a stream
-            if request.body and hasattr(request.body, 'read'):
-                # no position was saved, then retry would not work
-                if settings['body_position'] is None:
-                    return False
-                try:
-                    # attempt to rewind the body to the initial position
-                    request.body.seek(settings['body_position'], SEEK_SET)
-                except (UnsupportedOperation, ValueError):
-                    # if body is not seekable, then retry would not work
-                    return False
-            settings['count'] += 1
-            return True
-        return False
-
-    def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(StorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(StorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/policies_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,219 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import asyncio
-import random
-import logging
-from typing import Any, TYPE_CHECKING
-
-from azure.core.pipeline.policies import AsyncHTTPPolicy
-from azure.core.exceptions import AzureError
-
-from .policies import is_retry, StorageRetryPolicy
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-async def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        if asyncio.iscoroutine(settings['hook']):
-            await settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-        else:
-            settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-
-
-class AsyncStorageResponseHook(AsyncHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(AsyncStorageResponseHook, self).__init__()
-
-    async def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = await self.next.send(request)
-        await response.http_response.load_body()
-
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            if asyncio.iscoroutine(response_callback):
-                await response_callback(response)
-            else:
-                response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-class AsyncStorageRetryPolicy(StorageRetryPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    async def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        await transport.sleep(backoff)
-
-    async def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = await self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        await retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        await self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    await retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    await self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(AsyncStorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(AsyncStorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/request_handlers.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/request_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/request_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/request_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,147 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-
-import logging
-from os import fstat
-from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
-
-import isodate
-
-from azure.core.exceptions import raise_with_traceback
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def serialize_iso(attr):
-    """Serialize Datetime object into ISO-8601 formatted string.
-
-    :param Datetime attr: Object to be serialized.
-    :rtype: str
-    :raises: ValueError if format invalid.
-    """
-    if not attr:
-        return None
-    if isinstance(attr, str):
-        attr = isodate.parse_datetime(attr)
-    try:
-        utc = attr.utctimetuple()
-        if utc.tm_year > 9999 or utc.tm_year < 1:
-            raise OverflowError("Hit max or min date")
-
-        date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
-            utc.tm_year, utc.tm_mon, utc.tm_mday,
-            utc.tm_hour, utc.tm_min, utc.tm_sec)
-        return date + 'Z'
-    except (ValueError, OverflowError) as err:
-        msg = "Unable to serialize datetime object."
-        raise_with_traceback(ValueError, msg, err)
-    except AttributeError as err:
-        msg = "ISO-8601 object must be valid Datetime object."
-        raise_with_traceback(TypeError, msg, err)
-
-
-def get_length(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:  # pylint: disable=bare-except
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            try:
-                return fstat(fileno).st_size
-            except OSError:
-                # Not a valid fileno, may be possible requests returned
-                # a socket number?
-                pass
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
-
-
-def read_length(data):
-    try:
-        if hasattr(data, 'read'):
-            read_data = b''
-            for chunk in iter(lambda: data.read(4096), b""):
-                read_data += chunk
-            return len(read_data), read_data
-        if hasattr(data, '__iter__'):
-            read_data = b''
-            for chunk in data:
-                read_data += chunk
-            return len(read_data), read_data
-    except:  # pylint: disable=bare-except
-        pass
-    raise ValueError("Unable to calculate content length, please specify.")
-
-
-def validate_and_format_range_headers(
-        start_range, end_range, start_range_required=True,
-        end_range_required=True, check_content_md5=False, align_to_page=False):
-    # If end range is provided, start range must be provided
-    if (start_range_required or end_range is not None) and start_range is None:
-        raise ValueError("start_range value cannot be None.")
-    if end_range_required and end_range is None:
-        raise ValueError("end_range value cannot be None.")
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError("Invalid page blob start_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(start_range))
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError("Invalid page blob end_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(end_range))
-
-    # Format based on whether end_range is present
-    range_header = None
-    if end_range is not None:
-        range_header = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        range_header = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    range_validation = None
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError("Both start and end range requied for MD5 content validation.")
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
-        range_validation = 'true'
-
-    return range_header, range_validation
-
-
-def add_metadata_headers(metadata=None):
-    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
-    headers = {}
-    if metadata:
-        for key, value in metadata.items():
-            headers['x-ms-meta-{}'.format(key)] = value
-    return headers
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/response_handlers.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/response_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/response_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/response_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.exceptions import (
-    HttpResponseError,
-    ResourceNotFoundError,
-    ResourceModifiedError,
-    ResourceExistsError,
-    ClientAuthenticationError,
-    DecodeError)
-
-from .parser import _to_utc_datetime
-from .models import StorageErrorCode, UserDelegationKey, get_enum_value
-
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.exceptions import AzureError
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-class PartialBatchErrorException(HttpResponseError):
-    """There is a partial failure in batch operations.
-
-    :param str message: The message of the exception.
-    :param response: Server response to be deserialized.
-    :param list parts: A list of the parts in multipart response.
-    """
-
-    def __init__(self, message, response, parts):
-        self.parts = parts
-        super(PartialBatchErrorException, self).__init__(message=message, response=response)
-
-
-def parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def normalize_headers(headers):
-    normalized = {}
-    for key, value in headers.items():
-        if key.startswith('x-ms-'):
-            key = key[5:]
-        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
-    return normalized
-
-
-def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
-    raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
-    return {k[10:]: v for k, v in raw_metadata.items()}
-
-
-def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers)
-
-
-def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers), deserialized
-
-
-def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return response.location_mode, deserialized
-
-
-def process_storage_error(storage_error):
-    raise_error = HttpResponseError
-    error_code = storage_error.response.headers.get('x-ms-error-code')
-    error_message = storage_error.message
-    additional_data = {}
-    try:
-        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
-        if error_body:
-            for info in error_body.iter():
-                if info.tag.lower() == 'code':
-                    error_code = info.text
-                elif info.tag.lower() == 'message':
-                    error_message = info.text
-                else:
-                    additional_data[info.tag] = info.text
-    except DecodeError:
-        pass
-
-    try:
-        if error_code:
-            error_code = StorageErrorCode(error_code)
-            if error_code in [StorageErrorCode.condition_not_met,
-                              StorageErrorCode.blob_overwritten]:
-                raise_error = ResourceModifiedError
-            if error_code in [StorageErrorCode.invalid_authentication_info,
-                              StorageErrorCode.authentication_failed]:
-                raise_error = ClientAuthenticationError
-            if error_code in [StorageErrorCode.resource_not_found,
-                              StorageErrorCode.cannot_verify_copy_source,
-                              StorageErrorCode.blob_not_found,
-                              StorageErrorCode.queue_not_found,
-                              StorageErrorCode.container_not_found,
-                              StorageErrorCode.parent_not_found,
-                              StorageErrorCode.share_not_found]:
-                raise_error = ResourceNotFoundError
-            if error_code in [StorageErrorCode.account_already_exists,
-                              StorageErrorCode.account_being_created,
-                              StorageErrorCode.resource_already_exists,
-                              StorageErrorCode.resource_type_mismatch,
-                              StorageErrorCode.blob_already_exists,
-                              StorageErrorCode.queue_already_exists,
-                              StorageErrorCode.container_already_exists,
-                              StorageErrorCode.container_being_deleted,
-                              StorageErrorCode.queue_being_deleted,
-                              StorageErrorCode.share_already_exists,
-                              StorageErrorCode.share_being_deleted]:
-                raise_error = ResourceExistsError
-    except ValueError:
-        # Got an unknown error code
-        pass
-
-    try:
-        error_message += "\nErrorCode:{}".format(error_code.value)
-    except AttributeError:
-        error_message += "\nErrorCode:{}".format(error_code)
-    for name, info in additional_data.items():
-        error_message += "\n{}:{}".format(name, info)
-
-    error = raise_error(message=error_message, response=storage_error.response)
-    error.error_code = error_code
-    error.additional_info = additional_data
-    raise error
-
-
-def parse_to_internal_user_delegation_key(service_user_delegation_key):
-    internal_user_delegation_key = UserDelegationKey()
-    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
-    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
-    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
-    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
-    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
-    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
-    internal_user_delegation_key.value = service_user_delegation_key.value
-    return internal_user_delegation_key
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,209 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from datetime import date
-
-from .parser import _str, _to_utc_datetime
-from .constants import X_MS_VERSION
-from . import sign_string, url_quote
-
-
-class QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-    SIGNED_OID = 'skoid'
-    SIGNED_TID = 'sktid'
-    SIGNED_KEY_START = 'skt'
-    SIGNED_KEY_EXPIRY = 'ske'
-    SIGNED_KEY_SERVICE = 'sks'
-    SIGNED_KEY_VERSION = 'skv'
-
-    @staticmethod
-    def to_list():
-        return [
-            QueryStringConstants.SIGNED_SIGNATURE,
-            QueryStringConstants.SIGNED_PERMISSION,
-            QueryStringConstants.SIGNED_START,
-            QueryStringConstants.SIGNED_EXPIRY,
-            QueryStringConstants.SIGNED_RESOURCE,
-            QueryStringConstants.SIGNED_IDENTIFIER,
-            QueryStringConstants.SIGNED_IP,
-            QueryStringConstants.SIGNED_PROTOCOL,
-            QueryStringConstants.SIGNED_VERSION,
-            QueryStringConstants.SIGNED_CACHE_CONTROL,
-            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
-            QueryStringConstants.SIGNED_CONTENT_ENCODING,
-            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
-            QueryStringConstants.SIGNED_CONTENT_TYPE,
-            QueryStringConstants.START_PK,
-            QueryStringConstants.START_RK,
-            QueryStringConstants.END_PK,
-            QueryStringConstants.END_RK,
-            QueryStringConstants.SIGNED_RESOURCE_TYPES,
-            QueryStringConstants.SIGNED_SERVICES,
-            QueryStringConstants.SIGNED_OID,
-            QueryStringConstants.SIGNED_TID,
-            QueryStringConstants.SIGNED_KEY_START,
-            QueryStringConstants.SIGNED_KEY_EXPIRY,
-            QueryStringConstants.SIGNED_KEY_SERVICE,
-            QueryStringConstants.SIGNED_KEY_VERSION,
-        ]
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service
-        or to create a new account object.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account
-            SAS. You can combine values to provide access to more than one
-            resource type.
-        :param AccountSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy. You can combine
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _str(val) if val is not None else None
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(QueryStringConstants.SIGNED_START, start)
-        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, policy_id):
-        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(QueryStringConstants.SIGNED_START) +
-             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,548 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from concurrent import futures
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from threading import Lock
-from itertools import islice
-from math import ceil
-
-import six
-
-from azure.core.tracing.common import with_current_context
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
-
-
-def _parallel_uploads(executor, uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(executor.submit(with_current_context(uploader), next_chunk))
-
-    # Wait for the remaining uploads to finish
-    done, _running = futures.wait(running)
-    range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        validate_content=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        validate_content=validate_content,
-        **kwargs)
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_chunk), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_substream_block), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b""
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError("Blob data should be of type bytes.")
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b"" or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_lock is not None:
-            with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop("modified_access_conditions", None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return index, block_id
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        return not any(bytearray(chunk_data))
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-            self.current_length = int(self.response_headers["blob_append_offset"])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        length = len(chunk_data)
-        chunk_end = chunk_offset + length - 1
-        response = self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            length,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
-
-
-class SubStream(IOBase):
-
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = (
-            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        )
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-        super(SubStream, self).__init__()
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, size=None):
-        if self.closed:  # pylint: disable=using-constant-test
-            raise ValueError("Stream is closed.")
-
-        if size is None:
-            size = self._length - self._position
-
-        # adjust if out of bounds
-        if size + self._position >= self._length:
-            size = self._length - self._position
-
-        # return fast
-        if size == 0 or self._buffer.closed:
-            return b""
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(size)
-        bytes_read = len(read_buffer)
-        bytes_remaining = size - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_concurrency > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = -offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
-
-
-class IterStreamer(object):
-    """
-    File-like streaming iterator.
-    """
-
-    def __init__(self, generator, encoding="UTF-8"):
-        self.generator = generator
-        self.iterator = iter(generator)
-        self.leftover = b""
-        self.encoding = encoding
-
-    def __len__(self):
-        return self.generator.__len__()
-
-    def __iter__(self):
-        return self.iterator
-
-    def seekable(self):
-        return False
-
-    def next(self):
-        return next(self.iterator)
-
-    def tell(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator does not support tell.")
-
-    def seek(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator is unseekable.")
-
-    def read(self, size):
-        data = self.leftover
-        count = len(self.leftover)
-        try:
-            while count < size:
-                chunk = self.next()
-                if isinstance(chunk, six.text_type):
-                    chunk = chunk.encode(self.encoding)
-                data += chunk
-                count += len(chunk)
-        except StopIteration:
-            pass
-
-        if count > size:
-            self.leftover = data[size:]
-
-        return data[:size]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared/uploads_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,351 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-import asyncio
-from asyncio import Lock
-from itertools import islice
-import threading
-
-from math import ceil
-
-import six
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-
-
-async def _parallel_uploads(uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(asyncio.ensure_future(uploader(next_chunk)))
-
-    # Wait for the remaining uploads to finish
-    if running:
-        done, _running = await asyncio.wait(running)
-        range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-async def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_chunk(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for chunk in uploader.get_chunk_streams():
-            range_ids.append(await uploader.process_chunk(chunk))
-
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-async def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_substream_block(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for block in uploader.get_substream_blocks():
-            range_ids.append(await uploader.process_substream_block(block))
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError('Blob data should be of type bytes.')
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    async def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    async def _update_progress(self, length):
-        if self.progress_lock is not None:
-            async with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = await self._upload_chunk(chunk_offset, chunk_data)
-        await self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    async def process_substream_block(self, block_data):
-        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = await self._upload_substream_block(block_id, block_stream)
-        await self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop('modified_access_conditions', None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        await self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options)
-        return index, block_id
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        try:
-            await self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        for each_byte in chunk_data:
-            if each_byte not in [0, b'\x00']:
-                return False
-        return True
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = await self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-            self.current_length = int(self.response_headers['blob_append_offset'])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        length = len(chunk_data)
-        chunk_end = chunk_offset + length - 1
-        response = await self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            length,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-        return range_id, response
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,491 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, List, TYPE_CHECKING
-)
-
-from ._shared import sign_string
-from ._shared.constants import X_MS_VERSION
-from ._shared.models import Services
-from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants
-from ._shared.parser import _str
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from .import (
-        ResourceTypes,
-        AccountSasPermissions,
-        ShareSasPermissions,
-        FileSasPermissions
-    )
-
-class FileSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating file and share access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_file(self, share_name, directory_name=None, file_name=None,
-                      permission=None, expiry=None, start=None, policy_id=None,
-                      ip=None, protocol=None, cache_control=None,
-                      content_disposition=None, content_encoding=None,
-                      content_language=None, content_type=None):
-        '''
-        Generates a shared access signature for the file.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param str directory_name:
-            Name of directory. SAS tokens cannot be created for directories, so
-            this parameter should only be present if file_name is provided.
-        :param str file_name:
-            Name of file.
-        :param ~azure.storage.fileshare.FileSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str policy_id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        resource_path = share_name
-        if directory_name is not None:
-            resource_path += '/' + _str(directory_name) if directory_name is not None else None
-        resource_path += '/' + _str(file_name) if file_name is not None else None
-
-        sas = _FileSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(policy_id)
-        sas.add_resource('f')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, resource_path)
-
-        return sas.get_token()
-
-    def generate_share(self, share_name, permission=None, expiry=None,
-                       start=None, policy_id=None, ip=None, protocol=None,
-                       cache_control=None, content_disposition=None,
-                       content_encoding=None, content_language=None,
-                       content_type=None):
-        '''
-        Generates a shared access signature for the share.
-        Use the returned signature with the sas_token parameter of FileService.
-
-        :param str share_name:
-            Name of share.
-        :param ShareSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, create, write, delete, list.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str policy_id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy. To create a stored access policy, use
-            set_file_service_properties.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        :param str cache_control:
-            Response header value for Cache-Control when resource is accessed
-            using this shared access signature.
-        :param str content_disposition:
-            Response header value for Content-Disposition when resource is accessed
-            using this shared access signature.
-        :param str content_encoding:
-            Response header value for Content-Encoding when resource is accessed
-            using this shared access signature.
-        :param str content_language:
-            Response header value for Content-Language when resource is accessed
-            using this shared access signature.
-        :param str content_type:
-            Response header value for Content-Type when resource is accessed
-            using this shared access signature.
-        '''
-        sas = _FileSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(policy_id)
-        sas.add_resource('s')
-        sas.add_override_response_headers(cache_control, content_disposition,
-                                          content_encoding, content_language,
-                                          content_type)
-        sas.add_resource_signature(self.account_name, self.account_key, share_name)
-
-        return sas.get_token()
-
-
-class _FileSharedAccessHelper(_SharedAccessHelper):
-
-    def add_resource_signature(self, account_name, account_key, path):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/file/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_START) +
-             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) +
-             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
-             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) +
-             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
-             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key, string_to_sign))
-
-
-def generate_account_sas(
-        account_name,  # type: str
-        account_key,  # type: str
-        resource_types,  # type: Union[ResourceTypes, str]
-        permission,  # type: Union[AccountSasPermissions, str]
-        expiry,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs  # type: Any
-    ):
-    # type: (...) -> str
-    """Generates a shared access signature for the file service.
-
-    Use the returned signature with the credential parameter of any ShareServiceClient,
-    ShareClient, ShareDirectoryClient, or ShareFileClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-    :param ~azure.storage.fileshare.ResourceTypes resource_types:
-        Specifies the resource types that are accessible with the account SAS.
-    :param ~azure.storage.fileshare.AccountSasPermissions permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/file_samples_authentication.py
-            :start-after: [START generate_sas_token]
-            :end-before: [END generate_sas_token]
-            :language: python
-            :dedent: 8
-            :caption: Generate a sas token.
-    """
-    sas = SharedAccessSignature(account_name, account_key)
-    return sas.generate_account(
-        services=Services(fileshare=True),
-        resource_types=resource_types,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs
-    ) # type: ignore
-
-
-def generate_share_sas(
-        account_name,  # type: str
-        share_name,  # type: str
-        account_key,  # type: str
-        permission=None,  # type: Optional[Union[ShareSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        policy_id=None,  # type: Optional[str]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):  # type: (...) -> str
-    """Generates a shared access signature for a share.
-
-    Use the returned signature with the credential parameter of any ShareServiceClient,
-    ShareClient, ShareDirectoryClient, or ShareFileClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str share_name:
-        The name of the share.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-    :param ~azure.storage.fileshare.ShareSasPermissions permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, create, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str policy_id:
-        A unique value up to 64 characters in length that correlates to a
-        stored access policy. To create a stored access policy, use
-        :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`.
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-    """
-    sas = FileSharedAccessSignature(account_name, account_key)
-    return sas.generate_share(
-        share_name=share_name,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        policy_id=policy_id,
-        ip=ip,
-        **kwargs
-    )
-
-
-def generate_file_sas(
-        account_name,  # type: str
-        share_name,  # type: str
-        file_path,  # type: List[str]
-        account_key,  # type: str
-        permission=None,  # type: Optional[Union[FileSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        policy_id=None,  # type: Optional[str]
-        ip=None,  # type: Optional[str]
-        **kwargs # type: Any
-    ):
-    # type: (...) -> str
-    """Generates a shared access signature for a file.
-
-    Use the returned signature with the credential parameter of any ShareServiceClient,
-    ShareClient, ShareDirectoryClient, or ShareFileClient.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str share_name:
-        The name of the share.
-    :param file_path:
-        The file path represented as a list of path segments, including the file name.
-    :type file_path: List[str]
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-    :param ~azure.storage.fileshare.FileSasPermissions permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Permissions must be ordered read, write, delete, list.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str policy_id:
-        A unique value up to 64 characters in length that correlates to a
-        stored access policy.
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str cache_control:
-        Response header value for Cache-Control when resource is accessed
-        using this shared access signature.
-    :keyword str content_disposition:
-        Response header value for Content-Disposition when resource is accessed
-        using this shared access signature.
-    :keyword str content_encoding:
-        Response header value for Content-Encoding when resource is accessed
-        using this shared access signature.
-    :keyword str content_language:
-        Response header value for Content-Language when resource is accessed
-        using this shared access signature.
-    :keyword str content_type:
-        Response header value for Content-Type when resource is accessed
-        using this shared access signature.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-    """
-    sas = FileSharedAccessSignature(account_name, account_key)
-    if len(file_path) > 1:
-        dir_path = '/'.join(file_path[:-1])
-    else:
-        dir_path = None # type: ignore
-    return sas.generate_file( # type: ignore
-        share_name=share_name,
-        directory_name=dir_path,
-        file_name=file_path[-1],
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        policy_id=policy_id,
-        ip=ip,
-        **kwargs
-    )
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_version.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_version.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/_version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-VERSION = "12.1.1"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._file_client_async import ShareFileClient
-from ._directory_client_async import ShareDirectoryClient
-from ._share_client_async import ShareClient
-from ._share_service_client_async import ShareServiceClient
-from ._lease_async import ShareLeaseClient
-
-
-__all__ = [
-    'ShareFileClient',
-    'ShareDirectoryClient',
-    'ShareClient',
-    'ShareServiceClient',
-    'ShareLeaseClient',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_directory_client_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_directory_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_directory_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_directory_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,593 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-import time
-from typing import ( # pylint: disable=unused-import
-    Optional, Union, Any, Dict, TYPE_CHECKING
-)
-
-from azure.core.async_paging import AsyncItemPaged
-from azure.core.pipeline import AsyncPipeline
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.tracing.decorator_async import distributed_trace_async
-from .._parser import _get_file_permission, _datetime_to_str
-from .._shared.parser import _str
-
-from .._generated.aio import AzureFileStorage
-from .._generated.version import VERSION
-from .._generated.models import StorageErrorException
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
-from .._shared.policies_async import ExponentialRetry
-from .._shared.request_handlers import add_metadata_headers
-from .._shared.response_handlers import return_response_headers, process_storage_error
-from .._deserialize import deserialize_directory_properties
-from .._serialize import get_api_version
-from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase
-from ._file_client_async import ShareFileClient
-from ._models import DirectoryPropertiesPaged, HandlesPaged
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes
-    from .._generated.models import HandleItem
-
-
-class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase):
-    """A client to interact with a specific directory, although it may not yet exist.
-
-    For operations relating to a specific subdirectory or file in this share, the clients for those
-    entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the directory,
-        use the :func:`from_directory_url` classmethod.
-    :param share_name:
-        The name of the share for the directory.
-    :type share_name: str
-    :param str directory_path:
-        The directory path for the directory with which to interact.
-        If specified, this value will override a directory value specified in the directory URL.
-    :param str snapshot:
-        An optional share snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`ShareClient.create_snapshot`.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword loop:
-        The event loop to run the asynchronous tasks.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-    """
-    def __init__( # type: ignore
-            self, account_url,  # type: str
-            share_name, # type: str
-            directory_path, # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None, # type: Optional[Any]
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        loop = kwargs.pop('loop', None)
-        super(ShareDirectoryClient, self).__init__(
-            account_url,
-            share_name=share_name,
-            directory_path=directory_path,
-            snapshot=snapshot,
-            credential=credential,
-            loop=loop,
-            **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = loop
-
-    def get_file_client(self, file_name, **kwargs):
-        # type: (str, Any) -> ShareFileClient
-        """Get a client to interact with a specific file.
-
-        The file need not already exist.
-
-        :param str file_name:
-            The name of the file.
-        :returns: A File Client.
-        :rtype: ~azure.storage.fileshare.ShareFileClient
-        """
-        if self.directory_path:
-            file_name = self.directory_path.rstrip('/') + "/" + file_name
-
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ShareFileClient(
-            self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs)
-
-    def get_subdirectory_client(self, directory_name, **kwargs):
-        # type: (str, Any) -> ShareDirectoryClient
-        """Get a client to interact with a specific subdirectory.
-
-        The subdirectory need not already exist.
-
-        :param str directory_name:
-            The name of the subdirectory.
-        :returns: A Directory Client.
-        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START get_subdirectory_client]
-                :end-before: [END get_subdirectory_client]
-                :language: python
-                :dedent: 16
-                :caption: Gets the subdirectory client.
-        """
-        directory_path = self.directory_path.rstrip('/') + "/" + directory_name
-
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ShareDirectoryClient(
-            self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs)
-
-    @distributed_trace_async
-    async def create_directory(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Creates a new directory under the directory referenced by the client.
-
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the directory as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Directory-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START create_directory]
-                :end-before: [END create_directory]
-                :language: python
-                :dedent: 16
-                :caption: Creates a directory.
-        """
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return await self._client.directory.create( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def delete_directory(self, **kwargs):
-        # type: (**Any) -> None
-        """Marks the directory for deletion. The directory is
-        later deleted during garbage collection.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START delete_directory]
-                :end-before: [END delete_directory]
-                :language: python
-                :dedent: 16
-                :caption: Deletes a directory.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            await self._client.directory.delete(timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_directories_and_files(self, name_starts_with=None, **kwargs):
-        # type: (Optional[str], Any) -> AsyncItemPaged
-        """Lists all the directories and files under the directory.
-
-        :param str name_starts_with:
-            Filters the results to return only entities whose names
-            begin with the specified prefix.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START lists_directory]
-                :end-before: [END lists_directory]
-                :language: python
-                :dedent: 16
-                :caption: List directories and files.
-        """
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.directory.list_files_and_directories_segment,
-            sharesnapshot=self.snapshot,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=DirectoryPropertiesPaged)
-
-    @distributed_trace
-    def list_handles(self, recursive=False, **kwargs):
-        # type: (bool, Any) -> AsyncItemPaged
-        """Lists opened handles on a directory or a file under the directory.
-
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified by the client,
-            its files, its subdirectories and their files. Default value is False.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of HandleItem
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem]
-        """
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.directory.list_handles,
-            sharesnapshot=self.snapshot,
-            timeout=timeout,
-            recursive=recursive,
-            **kwargs)
-        return AsyncItemPaged(
-            command, results_per_page=results_per_page,
-            page_iterator_class=HandlesPaged)
-
-    @distributed_trace_async
-    async def close_handle(self, handle, **kwargs):
-        # type: (Union[str, HandleItem], Any) -> Dict[str, int]
-        """Close an open file handle.
-
-        :param handle:
-            A specific handle to close.
-        :type handle: str or ~azure.storage.fileshare.Handle
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        try:
-            handle_id = handle.id # type: ignore
-        except AttributeError:
-            handle_id = handle
-        if handle_id == '*':
-            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
-        try:
-            response = await self._client.directory.force_close_handles(
-                handle_id,
-                marker=None,
-                recursive=None,
-                sharesnapshot=self.snapshot,
-                cls=return_response_headers,
-                **kwargs
-            )
-            return {
-                'closed_handles_count': response.get('number_of_handles_closed', 0),
-                'failed_handles_count': response.get('number_of_handles_failed', 0)
-            }
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def close_all_handles(self, recursive=False, **kwargs):
-        # type: (bool, Any) -> Dict[str, int]
-        """Close any open file handles.
-
-        This operation will block until the service has closed all open handles.
-
-        :param bool recursive:
-            Boolean that specifies if operation should apply to the directory specified by the client,
-            its files, its subdirectories and their files. Default value is False.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        timeout = kwargs.pop('timeout', None)
-        start_time = time.time()
-
-        try_close = True
-        continuation_token = None
-        total_closed = 0
-        total_failed = 0
-        while try_close:
-            try:
-                response = await self._client.directory.force_close_handles(
-                    handle_id='*',
-                    timeout=timeout,
-                    marker=continuation_token,
-                    recursive=recursive,
-                    sharesnapshot=self.snapshot,
-                    cls=return_response_headers,
-                    **kwargs
-                )
-            except StorageErrorException as error:
-                process_storage_error(error)
-            continuation_token = response.get('marker')
-            try_close = bool(continuation_token)
-            total_closed += response.get('number_of_handles_closed', 0)
-            total_failed += response.get('number_of_handles_failed', 0)
-            if timeout:
-                timeout = max(0, timeout - (time.time() - start_time))
-        return {
-            'closed_handles_count': total_closed,
-            'failed_handles_count': total_failed
-        }
-
-    @distributed_trace_async
-    async def get_directory_properties(self, **kwargs):
-        # type: (Any) -> DirectoryProperties
-        """Returns all user-defined metadata and system properties for the
-        specified directory. The data returned does not include the directory's
-        list of files.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: DirectoryProperties
-        :rtype: ~azure.storage.fileshare.DirectoryProperties
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response = await self._client.directory.get_properties(
-                timeout=timeout,
-                cls=deserialize_directory_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return response # type: ignore
-
-    @distributed_trace_async
-    async def set_directory_metadata(self, metadata, **kwargs):
-        # type: (Dict[str, Any], Any) ->  Dict[str, Any]
-        """Sets the metadata for the directory.
-
-        Each call to this operation replaces all existing metadata
-        attached to the directory. To remove all metadata from the directory,
-        call this operation with an empty metadata dict.
-
-        :param metadata:
-            Name-value pairs associated with the directory as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Directory-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        try:
-            return await self._client.directory.set_metadata( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_http_headers(self, file_attributes="none",  # type: Union[str, NTFSAttributes]
-                               file_creation_time="preserve",  # type: Union[str, datetime]
-                               file_last_write_time="preserve",  # type: Union[str, datetime]
-                               file_permission=None,  # type: Optional[str]
-                               permission_key=None,  # type: Optional[str]
-                               **kwargs  # type: Any
-                               ):
-        # type: (...) -> Dict[str, Any]
-        """Sets HTTP headers on the directory.
-
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, indicates preservation of existing values.
-            Here is an example for when the var type is str: 'Temporary|Archive'
-        :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :param file_creation_time: Creation time for the file
-            Default value: Preserve.
-        :type file_creation_time: str or datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Preserve.
-        :type file_last_write_time: str or datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        timeout = kwargs.pop('timeout', None)
-        file_permission = _get_file_permission(file_permission, permission_key, 'preserve')
-        try:
-            return await self._client.directory.set_properties(  # type: ignore
-                file_attributes=_str(file_attributes),
-                file_creation_time=_datetime_to_str(file_creation_time),
-                file_last_write_time=_datetime_to_str(file_last_write_time),
-                file_permission=file_permission,
-                file_permission_key=permission_key,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def create_subdirectory(
-            self, directory_name,  # type: str
-            **kwargs
-        ):
-        # type: (...) -> ShareDirectoryClient
-        """Creates a new subdirectory and returns a client to interact
-        with the subdirectory.
-
-        :param str directory_name:
-            The name of the subdirectory.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the subdirectory as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: ShareDirectoryClient
-        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START create_subdirectory]
-                :end-before: [END create_subdirectory]
-                :language: python
-                :dedent: 16
-                :caption: Create a subdirectory.
-        """
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        subdir = self.get_subdirectory_client(directory_name)
-        await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs)
-        return subdir # type: ignore
-
-    @distributed_trace_async
-    async def delete_subdirectory(
-            self, directory_name,  # type: str
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Deletes a subdirectory.
-
-        :param str directory_name:
-            The name of the subdirectory.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START delete_subdirectory]
-                :end-before: [END delete_subdirectory]
-                :language: python
-                :dedent: 16
-                :caption: Delete a subdirectory.
-        """
-        timeout = kwargs.pop('timeout', None)
-        subdir = self.get_subdirectory_client(directory_name)
-        await subdir.delete_directory(timeout=timeout, **kwargs)
-
-    @distributed_trace_async
-    async def upload_file(
-            self, file_name,  # type: str
-            data, # type: Any
-            length=None, # type: Optional[int]
-            **kwargs # type: Any
-        ):
-        # type: (...) -> ShareFileClient
-        """Creates a new file in the directory and returns a ShareFileClient
-        to interact with the file.
-
-        :param str file_name:
-            The name of the file.
-        :param Any data:
-            Content of the file.
-        :param int length:
-            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the file as metadata.
-        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            file.
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: ShareFileClient
-        :rtype: ~azure.storage.fileshare.aio.ShareFileClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START upload_file_to_directory]
-                :end-before: [END upload_file_to_directory]
-                :language: python
-                :dedent: 16
-                :caption: Upload a file to a directory.
-        """
-        file_client = self.get_file_client(file_name)
-        await file_client.upload_file(
-            data,
-            length=length,
-            **kwargs)
-        return file_client # type: ignore
-
-    @distributed_trace_async
-    async def delete_file(
-            self, file_name,  # type: str
-            **kwargs  # type: Optional[Any]
-        ):
-        # type: (...) -> None
-        """Marks the specified file for deletion. The file is later
-        deleted during garbage collection.
-
-        :param str file_name:
-            The name of the file to delete.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_directory_async.py
-                :start-after: [START delete_file_in_directory]
-                :end-before: [END delete_file_in_directory]
-                :language: python
-                :dedent: 16
-                :caption: Delete a file in a directory.
-        """
-        file_client = self.get_file_client(file_name)
-        await file_client.delete_file(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_download_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_download_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_download_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_download_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,467 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import asyncio
-import sys
-from io import BytesIO
-from itertools import islice
-import warnings
-
-from azure.core.exceptions import HttpResponseError
-from .._shared.encryption import decrypt_blob
-from .._shared.request_handlers import validate_and_format_range_headers
-from .._shared.response_handlers import process_storage_error, parse_length_from_content_range
-from .._download import process_range_and_offset, _ChunkDownloader
-
-
-async def process_content(data, start_offset, end_offset, encryption):
-    if data is None:
-        raise ValueError("Response cannot be None.")
-    try:
-        content = data.response.body()
-    except Exception as error:
-        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
-    if encryption.get('key') is not None or encryption.get('resolver') is not None:
-        try:
-            return decrypt_blob(
-                encryption.get('required'),
-                encryption.get('key'),
-                encryption.get('resolver'),
-                content,
-                start_offset,
-                end_offset,
-                data.response.headers)
-        except Exception as error:
-            raise HttpResponseError(
-                message="Decryption failed.",
-                response=data.response,
-                error=error)
-    return content
-
-
-class _AsyncChunkDownloader(_ChunkDownloader):
-    def __init__(self, **kwargs):
-        super(_AsyncChunkDownloader, self).__init__(**kwargs)
-        self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None
-        self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None
-
-    async def process_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
-        length = chunk_end - chunk_start
-        if length > 0:
-            await self._write_to_stream(chunk_data, chunk_start)
-            await self._update_progress(length)
-
-    async def yield_chunk(self, chunk_start):
-        chunk_start, chunk_end = self._calculate_range(chunk_start)
-        return await self._download_chunk(chunk_start, chunk_end - 1)
-
-    async def _update_progress(self, length):
-        if self.progress_lock:
-            async with self.progress_lock:  # pylint: disable=not-async-context-manager
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    async def _write_to_stream(self, chunk_data, chunk_start):
-        if self.stream_lock:
-            async with self.stream_lock:  # pylint: disable=not-async-context-manager
-                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
-                self.stream.write(chunk_data)
-        else:
-            self.stream.write(chunk_data)
-
-    async def _download_chunk(self, chunk_start, chunk_end):
-        download_range, offset = process_range_and_offset(
-            chunk_start, chunk_end, chunk_end, self.encryption_options
-        )
-        range_header, range_validation = validate_and_format_range_headers(
-            download_range[0],
-            download_range[1],
-            check_content_md5=self.validate_content
-        )
-        try:
-            _, response = await self.client.download(
-                range=range_header,
-                range_get_content_md5=range_validation,
-                validate_content=self.validate_content,
-                data_stream_total=self.total_size,
-                download_stream_current=self.progress_total,
-                **self.request_options
-            )
-        except HttpResponseError as error:
-            process_storage_error(error)
-
-        chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options)
-        return chunk_data
-
-
-class _AsyncChunkIterator(object):
-    """Async iterator for chunks in file download stream."""
-
-    def __init__(self, size, content, downloader):
-        self.size = size
-        self._current_content = content
-        self._iter_downloader = downloader
-        self._iter_chunks = None
-        self._complete = (size == 0)
-
-    def __len__(self):
-        return self.size
-
-    def __iter__(self):
-        raise TypeError("Async stream must be iterated asynchronously.")
-
-    def __aiter__(self):
-        return self
-
-    async def __anext__(self):
-        """Iterate through responses."""
-        if self._complete:
-            raise StopAsyncIteration("Download complete")
-        if not self._iter_downloader:
-            # If no iterator was supplied, the download completed with
-            # the initial GET, so we just return that data
-            self._complete = True
-            return self._current_content
-
-        if not self._iter_chunks:
-            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
-        else:
-            try:
-                chunk = next(self._iter_chunks)
-            except StopIteration:
-                raise StopAsyncIteration("Download complete")
-            self._current_content = await self._iter_downloader.yield_chunk(chunk)
-
-        return self._current_content
-
-
-class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
-    """A streaming object to download from Azure Storage.
-
-    :ivar str name:
-        The name of the file being downloaded.
-    :ivar: str path:
-        The full path of the file.
-    :ivar str share:
-        The name of the share where the file is.
-    :ivar ~azure.storage.fileshare.FileProperties properties:
-        The properties of the file being downloaded. If only a range of the data is being
-        downloaded, this will be reflected in the properties.
-    :ivar int size:
-        The size of the total data in the stream. This will be the byte range if speficied,
-        otherwise the total size of the file.
-    """
-
-    def __init__(
-            self,
-            client=None,
-            config=None,
-            start_range=None,
-            end_range=None,
-            validate_content=None,
-            encryption_options=None,
-            max_concurrency=1,
-            name=None,
-            path=None,
-            share=None,
-            encoding=None,
-            **kwargs
-    ):
-        self.name = name
-        self.path = path
-        self.share = share
-        self.properties = None
-        self.size = None
-
-        self._client = client
-        self._config = config
-        self._start_range = start_range
-        self._end_range = end_range
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        self._validate_content = validate_content
-        self._encryption_options = encryption_options or {}
-        self._request_options = kwargs
-        self._location_mode = None
-        self._download_complete = False
-        self._current_content = None
-        self._file_size = None
-        self._response = None
-
-        # The service only provides transactional MD5s for chunks under 4MB.
-        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
-        # chunk so a transactional MD5 can be retrieved.
-        self._first_get_size = self._config.max_single_get_size if not self._validate_content \
-            else self._config.max_chunk_get_size
-        initial_request_start = self._start_range if self._start_range is not None else 0
-        if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
-            initial_request_end = self._end_range
-        else:
-            initial_request_end = initial_request_start + self._first_get_size - 1
-
-        self._initial_range, self._initial_offset = process_range_and_offset(
-            initial_request_start, initial_request_end, self._end_range, self._encryption_options
-        )
-
-    def __len__(self):
-        return self.size
-
-    async def _setup(self):
-        self._response = await self._initial_request()
-        self.properties = self._response.properties
-        self.properties.name = self.name
-        self.properties.path = self.path
-        self.properties.share = self.share
-
-        # Set the content length to the download size instead of the size of
-        # the last range
-        self.properties.size = self.size
-
-        # Overwrite the content range to the user requested range
-        self.properties.content_range = 'bytes {0}-{1}/{2}'.format(
-            self._start_range,
-            self._end_range,
-            self._file_size
-        )
-
-        # Overwrite the content MD5 as it is the MD5 for the last range instead
-        # of the stored MD5
-        # TODO: Set to the stored MD5 when the service returns this
-        self.properties.content_md5 = None
-
-        if self.size == 0:
-            self._current_content = b""
-        else:
-            self._current_content = await process_content(
-                self._response,
-                self._initial_offset[0],
-                self._initial_offset[1],
-                self._encryption_options
-            )
-
-    async def _initial_request(self):
-        range_header, range_validation = validate_and_format_range_headers(
-            self._initial_range[0],
-            self._initial_range[1],
-            start_range_required=False,
-            end_range_required=False,
-            check_content_md5=self._validate_content)
-
-        try:
-            location_mode, response = await self._client.download(
-                range=range_header,
-                range_get_content_md5=range_validation,
-                validate_content=self._validate_content,
-                data_stream_total=None,
-                download_stream_current=0,
-                **self._request_options)
-
-            # Check the location we read from to ensure we use the same one
-            # for subsequent requests.
-            self._location_mode = location_mode
-
-            # Parse the total file size and adjust the download size if ranges
-            # were specified
-            self._file_size = parse_length_from_content_range(response.properties.content_range)
-            if self._end_range is not None:
-                # Use the length unless it is over the end of the file
-                self.size = min(self._file_size, self._end_range - self._start_range + 1)
-            elif self._start_range is not None:
-                self.size = self._file_size - self._start_range
-            else:
-                self.size = self._file_size
-
-        except HttpResponseError as error:
-            if self._start_range is None and error.response.status_code == 416:
-                # Get range will fail on an empty file. If the user did not
-                # request a range, do a regular get request in order to get
-                # any properties.
-                try:
-                    _, response = await self._client.download(
-                        validate_content=self._validate_content,
-                        data_stream_total=0,
-                        download_stream_current=0,
-                        **self._request_options)
-                except HttpResponseError as error:
-                    process_storage_error(error)
-
-                # Set the download size to empty
-                self.size = 0
-                self._file_size = 0
-            else:
-                process_storage_error(error)
-
-        # If the file is small, the download is complete at this point.
-        # If file size is large, download the rest of the file in chunks.
-        if response.properties.size == self.size:
-            self._download_complete = True
-        return response
-
-    def chunks(self):
-        """Iterate over chunks in the download stream.
-
-        :rtype: Iterable[bytes]
-        """
-        if self.size == 0 or self._download_complete:
-            iter_downloader = None
-        else:
-            data_end = self._file_size
-            if self._end_range is not None:
-                # Use the length unless it is over the end of the file
-                data_end = min(self._file_size, self._end_range + 1)
-            iter_downloader = _AsyncChunkDownloader(
-                client=self._client,
-                total_size=self.size,
-                chunk_size=self._config.max_chunk_get_size,
-                current_progress=self._first_get_size,
-                start_range=self._initial_range[1] + 1,  # Start where the first download ended
-                end_range=data_end,
-                stream=None,
-                parallel=False,
-                validate_content=self._validate_content,
-                encryption_options=self._encryption_options,
-                use_location=self._location_mode,
-                **self._request_options)
-        return _AsyncChunkIterator(
-            size=self.size,
-            content=self._current_content,
-            downloader=iter_downloader)
-
-    async def readall(self):
-        """Download the contents of this file.
-
-        This operation is blocking until all data is downloaded.
-        :rtype: bytes or str
-        """
-        stream = BytesIO()
-        await self.readinto(stream)
-        data = stream.getvalue()
-        if self._encoding:
-            return data.decode(self._encoding)
-        return data
-
-    async def content_as_bytes(self, max_concurrency=1):
-        """Download the contents of this file.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :rtype: bytes
-        """
-        warnings.warn(
-            "content_as_bytes is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        return await self.readall()
-
-    async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
-        """Download the contents of this file, and decode as text.
-
-        This operation is blocking until all data is downloaded.
-
-        :keyword int max_concurrency:
-            The number of parallel connections with which to download.
-        :param str encoding:
-            Test encoding to decode the downloaded bytes. Default is UTF-8.
-        :rtype: str
-        """
-        warnings.warn(
-            "content_as_text is deprecated, use readall instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        self._encoding = encoding
-        return await self.readall()
-
-    async def readinto(self, stream):
-        """Download the contents of this file to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The number of bytes read.
-        :rtype: int
-        """
-        # the stream must be seekable if parallel download is required
-        parallel = self._max_concurrency > 1
-        if parallel:
-            error_message = "Target stream handle must be seekable."
-            if sys.version_info >= (3,) and not stream.seekable():
-                raise ValueError(error_message)
-
-            try:
-                stream.seek(stream.tell())
-            except (NotImplementedError, AttributeError):
-                raise ValueError(error_message)
-
-        # Write the content to the user stream
-        stream.write(self._current_content)
-        if self._download_complete:
-            return self.size
-
-        data_end = self._file_size
-        if self._end_range is not None:
-            # Use the length unless it is over the end of the file
-            data_end = min(self._file_size, self._end_range + 1)
-
-        downloader = _AsyncChunkDownloader(
-            client=self._client,
-            total_size=self.size,
-            chunk_size=self._config.max_chunk_get_size,
-            current_progress=self._first_get_size,
-            start_range=self._initial_range[1] + 1,  # start where the first download ended
-            end_range=data_end,
-            stream=stream,
-            parallel=parallel,
-            validate_content=self._validate_content,
-            encryption_options=self._encryption_options,
-            use_location=self._location_mode,
-            **self._request_options)
-
-        dl_tasks = downloader.get_chunk_offsets()
-        running_futures = [
-            asyncio.ensure_future(downloader.process_chunk(d))
-            for d in islice(dl_tasks, 0, self._max_concurrency)
-        ]
-        while running_futures:
-            # Wait for some download to finish before adding a new one
-            _done, running_futures = await asyncio.wait(
-                running_futures, return_when=asyncio.FIRST_COMPLETED)
-            try:
-                next_chunk = next(dl_tasks)
-            except StopIteration:
-                break
-            else:
-                running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
-
-        if running_futures:
-            # Wait for the remaining downloads to finish
-            await asyncio.wait(running_futures)
-        return self.size
-
-    async def download_to_stream(self, stream, max_concurrency=1):
-        """Download the contents of this file to a stream.
-
-        :param stream:
-            The stream to download to. This can be an open file-handle,
-            or any writable stream. The stream must be seekable if the download
-            uses more than one parallel connection.
-        :returns: The properties of the downloaded file.
-        :rtype: Any
-        """
-        warnings.warn(
-            "download_to_stream is deprecated, use readinto instead",
-            DeprecationWarning
-        )
-        self._max_concurrency = max_concurrency
-        await self.readinto(stream)
-        return self.properties
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_file_client_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_file_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_file_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_file_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,1165 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-many-lines
-import functools
-import time
-from io import BytesIO
-from typing import Optional, Union, IO, List, Dict, Any, Iterable, TYPE_CHECKING  # pylint: disable=unused-import
-
-import six
-from azure.core.async_paging import AsyncItemPaged
-
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.tracing.decorator_async import distributed_trace_async
-from .._parser import _datetime_to_str, _get_file_permission
-from .._shared.parser import _str
-
-from .._generated.aio import AzureFileStorage
-from .._generated.version import VERSION
-from .._generated.models import StorageErrorException, FileHTTPHeaders
-from .._shared.policies_async import ExponentialRetry
-from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin
-from .._shared.request_handlers import add_metadata_headers, get_length
-from .._shared.response_handlers import return_response_headers, process_storage_error
-from .._deserialize import deserialize_file_properties, deserialize_file_stream
-from .._serialize import get_access_conditions, get_smb_properties, get_api_version
-from .._file_client import ShareFileClient as ShareFileClientBase
-from ._models import HandlesPaged
-from ._lease_async import ShareLeaseClient
-from ._download_async import StorageStreamDownloader
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes
-    from .._generated.models import HandleItem
-
-
-async def _upload_file_helper(
-    client,
-    stream,
-    size,
-    metadata,
-    content_settings,
-    validate_content,
-    timeout,
-    max_concurrency,
-    file_settings,
-    file_attributes="none",
-    file_creation_time="now",
-    file_last_write_time="now",
-    file_permission=None,
-    file_permission_key=None,
-    **kwargs
-):
-    try:
-        if size is None or size < 0:
-            raise ValueError("A content size must be specified for a File.")
-        response = await client.create_file(
-            size, content_settings=content_settings, metadata=metadata,
-            file_attributes=file_attributes,
-            file_creation_time=file_creation_time,
-            file_last_write_time=file_last_write_time,
-            file_permission=file_permission,
-            permission_key=file_permission_key,
-            timeout=timeout,
-            **kwargs
-        )
-        if size == 0:
-            return response
-
-        responses = await upload_data_chunks(
-            service=client,
-            uploader_class=FileChunkUploader,
-            total_size=size,
-            chunk_size=file_settings.max_range_size,
-            stream=stream,
-            max_concurrency=max_concurrency,
-            validate_content=validate_content,
-            timeout=timeout,
-            **kwargs
-        )
-        return sorted(responses, key=lambda r: r.get('last_modified'))[-1]
-    except StorageErrorException as error:
-        process_storage_error(error)
-
-
-class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase):
-    """A client to interact with a specific file, although that file may not yet exist.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the
-        file, use the :func:`from_file_url` classmethod.
-    :param share_name:
-        The name of the share for the file.
-    :type share_name: str
-    :param str file_path:
-        The file path to the file with which to interact. If specified, this value will override
-        a file value specified in the file URL.
-    :param str snapshot:
-        An optional file snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`ShareClient.create_snapshot`.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword loop:
-        The event loop to run the asynchronous tasks.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-    """
-
-    def __init__(  # type: ignore
-        self,
-        account_url,  # type: str
-        share_name,  # type: str
-        file_path,  # type: str
-        snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
-        loop = kwargs.pop('loop', None)
-        super(ShareFileClient, self).__init__(
-            account_url, share_name=share_name, file_path=file_path, snapshot=snapshot,
-            credential=credential, loop=loop, **kwargs
-        )
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = loop
-
-    @distributed_trace_async
-    async def acquire_lease(self, lease_id=None, **kwargs):
-        # type: (int, Optional[str], **Any) -> BlobLeaseClient
-        """Requests a new lease.
-
-        If the file does not have an active lease, the File
-        Service creates a lease on the blob and returns a new lease.
-
-        :param str lease_id:
-            Proposed lease ID, in a GUID string format. The File Service
-            returns 400 (Invalid request) if the proposed lease ID is not
-            in the correct format.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A ShareLeaseClient object.
-        :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/blob_samples_common.py
-                :start-after: [START acquire_lease_on_blob]
-                :end-before: [END acquire_lease_on_blob]
-                :language: python
-                :dedent: 8
-                :caption: Acquiring a lease on a blob.
-        """
-        lease = ShareLeaseClient(self, lease_id=lease_id)  # type: ignore
-        await lease.acquire(**kwargs)
-        return lease
-
-    @distributed_trace_async
-    async def create_file(  # type: ignore
-        self,
-        size,  # type: int
-        file_attributes="none",  # type: Union[str, NTFSAttributes]
-        file_creation_time="now",  # type: Union[str, datetime]
-        file_last_write_time="now",  # type: Union[str, datetime]
-        file_permission=None,  # type: Optional[str]
-        permission_key=None,  # type: Optional[str]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> Dict[str, Any]
-        """Creates a new file.
-
-        Note that it only initializes the file with no content.
-
-        :param int size: Specifies the maximum size for the file,
-            up to 1 TB.
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, the default value would be "None" and the attributes will be set to "Archive".
-            Here is an example for when the var type is str: 'Temporary|Archive'.
-            file_attributes value is not case sensitive.
-        :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :param file_creation_time: Creation time for the file
-            Default value: Now.
-        :type file_creation_time: str or ~datetime.datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Now.
-        :type file_last_write_time: str or ~datetime.datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the file as metadata.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client_async.py
-                :start-after: [START create_file]
-                :end-before: [END create_file]
-                :language: python
-                :dedent: 16
-                :caption: Create a file.
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        content_settings = kwargs.pop('content_settings', None)
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        if self.require_encryption and not self.key_encryption_key:
-            raise ValueError("Encryption required but no key was provided.")
-
-        headers = kwargs.pop("headers", {})
-        headers.update(add_metadata_headers(metadata))
-        file_http_headers = None
-        if content_settings:
-            file_http_headers = FileHTTPHeaders(
-                file_cache_control=content_settings.cache_control,
-                file_content_type=content_settings.content_type,
-                file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-                file_content_encoding=content_settings.content_encoding,
-                file_content_language=content_settings.content_language,
-                file_content_disposition=content_settings.content_disposition,
-            )
-        file_permission = _get_file_permission(file_permission, permission_key, 'Inherit')
-        try:
-            return await self._client.file.create(  # type: ignore
-                file_content_length=size,
-                metadata=metadata,
-                file_attributes=_str(file_attributes),
-                file_creation_time=_datetime_to_str(file_creation_time),
-                file_last_write_time=_datetime_to_str(file_last_write_time),
-                file_permission=file_permission,
-                file_permission_key=permission_key,
-                file_http_headers=file_http_headers,
-                lease_access_conditions=access_conditions,
-                headers=headers,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def upload_file(
-        self, data,  # type: Any
-        length=None,  # type: Optional[int]
-        file_attributes="none",  # type: Union[str, NTFSAttributes]
-        file_creation_time="now",  # type: Union[str, datetime]
-        file_last_write_time="now",  # type: Union[str, datetime]
-        file_permission=None,  # type: Optional[str]
-        permission_key=None,  # type: Optional[str]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> Dict[str, Any]
-        """Uploads a new file.
-
-        :param Any data:
-            Content of the file.
-        :param int length:
-            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, the default value would be "None" and the attributes will be set to "Archive".
-            Here is an example for when the var type is str: 'Temporary|Archive'.
-            file_attributes value is not case sensitive.
-        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes
-        :param file_creation_time: Creation time for the file
-            Default value: Now.
-        :type file_creation_time: str or ~datetime.datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Now.
-        :type file_last_write_time: str or ~datetime.datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the file as metadata.
-        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each range of the file. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            file.
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client_async.py
-                :start-after: [START upload_file]
-                :end-before: [END upload_file]
-                :language: python
-                :dedent: 16
-                :caption: Upload a file.
-        """
-        metadata = kwargs.pop('metadata', None)
-        content_settings = kwargs.pop('content_settings', None)
-        max_concurrency = kwargs.pop('max_concurrency', 1)
-        validate_content = kwargs.pop('validate_content', False)
-        timeout = kwargs.pop('timeout', None)
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Encryption not supported.")
-
-        if isinstance(data, six.text_type):
-            data = data.encode(encoding)
-        if length is None:
-            length = get_length(data)
-        if isinstance(data, bytes):
-            data = data[:length]
-
-        if isinstance(data, bytes):
-            stream = BytesIO(data)
-        elif hasattr(data, "read"):
-            stream = data
-        elif hasattr(data, "__iter__"):
-            stream = IterStreamer(data, encoding=encoding)  # type: ignore
-        else:
-            raise TypeError("Unsupported data type: {}".format(type(data)))
-        return await _upload_file_helper(  # type: ignore
-            self,
-            stream,
-            length,
-            metadata,
-            content_settings,
-            validate_content,
-            timeout,
-            max_concurrency,
-            self._config,
-            file_attributes=file_attributes,
-            file_creation_time=file_creation_time,
-            file_last_write_time=file_last_write_time,
-            file_permission=file_permission,
-            file_permission_key=permission_key,
-            **kwargs
-        )
-
-    @distributed_trace_async
-    async def start_copy_from_url(self, source_url, **kwargs):
-        # type: (str, Any) -> Any
-        """Initiates the copying of data from a source URL into the file
-        referenced by the client.
-
-        The status of this copy operation can be found using the `get_properties`
-        method.
-
-        :param str source_url:
-            Specifies the URL of the source file.
-        :keyword str file_permission:
-            If specified the permission (security descriptor) shall be set for the directory/file.
-            This value can be set to "source" to copy the security descriptor from the source file.
-            Otherwise if set, this value will be used to override the source value. If not set, permission value
-            is inherited from the parent directory of the target file. This setting can be
-            used if Permission size is <= 8KB, otherwise permission_key shall be used.
-            If SDDL is specified as input, it must have owner, group and dacl.
-            Note: Only one of the file_permission or permission_key should be specified.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword str permission_key:
-            Key of the permission to be set for the directory/file.
-            This value can be set to "source" to copy the security descriptor from the source file.
-            Otherwise if set, this value will be used to override the source value. If not set, permission value
-            is inherited from the parent directory of the target file.
-            Note: Only one of the file_permission or permission_key should be specified.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword file_attributes:
-            This value can be set to "source" to copy file attributes from the source file to the target file,
-            or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes
-            to set on the target file. If this is not set, the default value is "Archive".
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :keyword file_creation_time:
-            This value can be set to "source" to copy the creation time from the source file to the target file,
-            or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format.
-            If this is not set, creation time will be set to the date time value of the creation
-            (or when it was overwritten) of the target file by copy engine.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :paramtype file_creation_time: str or ~datetime.datetime
-        :keyword file_last_write_time:
-            This value can be set to "source" to copy the last write time from the source file to the target file, or
-            a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format.
-            If this is not set, value will be the last write time to the file by the copy engine.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :paramtype file_last_write_time: str or ~datetime.datetime
-        :keyword bool ignore_read_only:
-            Specifies the option to overwrite the target file if it already exists and has read-only attribute set.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword bool set_archive_attribute:
-            Specifies the option to set the archive attribute on the target file.
-            True means the archive attribute will be set on the target file despite attribute
-            overrides or the source file state.
-
-            .. versionadded:: 12.1.0
-                This parameter was introduced in API version '2019-07-07'.
-
-        :keyword metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client_async.py
-                :start-after: [START copy_file_from_url]
-                :end-before: [END copy_file_from_url]
-                :language: python
-                :dedent: 16
-                :caption: Copy a file from a URL
-        """
-        metadata = kwargs.pop('metadata', None)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop("headers", {})
-        headers.update(add_metadata_headers(metadata))
-        kwargs.update(get_smb_properties(kwargs))
-        try:
-            return await self._client.file.start_copy(
-                source_url,
-                metadata=metadata,
-                lease_access_conditions=access_conditions,
-                headers=headers,
-                cls=return_response_headers,
-                timeout=timeout,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def abort_copy(self, copy_id, **kwargs):
-        # type: (Union[str, FileProperties], Any) -> None
-        """Abort an ongoing copy operation.
-
-        This will leave a destination file with zero length and full metadata.
-        This will raise an error if the copy operation has already ended.
-
-        :param copy_id:
-            The copy operation to abort. This can be either an ID, or an
-            instance of FileProperties.
-        :type copy_id: str or ~azure.storage.fileshare.FileProperties
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            copy_id = copy_id.copy.id
-        except AttributeError:
-            try:
-                copy_id = copy_id["copy_id"]
-            except TypeError:
-                pass
-        try:
-            await self._client.file.abort_copy(copy_id=copy_id,
-                                               lease_access_conditions=access_conditions,
-                                               timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def download_file(
-        self,
-        offset=None,  # type: Optional[int]
-        length=None,  # type: Optional[int]
-        **kwargs
-    ):
-        # type: (...) -> Iterable[bytes]
-        """Downloads a file to a stream with automatic chunking.
-
-        :param int offset:
-            Start of byte range to use for downloading a section of the file.
-            Must be set if length is provided.
-        :param int length:
-            Number of bytes to read from the stream. This is optional, but
-            should be supplied for optimal performance.
-        :keyword int max_concurrency:
-            Maximum number of parallel connections to use.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash for each chunk of the file. The storage
-            service checks the hash of the content that has arrived with the hash
-            that was sent. This is primarily valuable for detecting bitflips on
-            the wire if using http instead of https as https (the default) will
-            already validate. Note that this MD5 hash is not stored with the
-            file. Also note that if enabled, the memory-efficient upload algorithm
-            will not be used, because computing the MD5 hash requires buffering
-            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A iterable data generator (stream)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client_async.py
-                :start-after: [START download_file]
-                :end-before: [END download_file]
-                :language: python
-                :dedent: 16
-                :caption: Download a file.
-        """
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Encryption not supported.")
-        if length is not None and offset is None:
-            raise ValueError("Offset value must not be None if length is set.")
-
-        range_end = None
-        if length is not None:
-            range_end = offset + length - 1  # Service actually uses an end-range inclusive index
-
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-
-        downloader = StorageStreamDownloader(
-            client=self._client.file,
-            config=self._config,
-            start_range=offset,
-            end_range=range_end,
-            encryption_options=None,
-            name=self.file_name,
-            path='/'.join(self.file_path),
-            share=self.share_name,
-            lease_access_conditions=access_conditions,
-            cls=deserialize_file_stream,
-            **kwargs
-        )
-        await downloader._setup()  # pylint: disable=protected-access
-        return downloader
-
-    @distributed_trace_async
-    async def delete_file(self, **kwargs):
-        # type: (Any) -> None
-        """Marks the specified file for deletion. The file is
-        later deleted during garbage collection.
-
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_client_async.py
-                :start-after: [START delete_file]
-                :end-before: [END delete_file]
-                :language: python
-                :dedent: 16
-                :caption: Delete a file.
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_file_properties(self, **kwargs):
-        # type: (Any) -> FileProperties
-        """Returns all user-defined metadata, standard HTTP properties, and
-        system properties for the file.
-
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: FileProperties
-        :rtype: ~azure.storage.fileshare.FileProperties
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            file_props = await self._client.file.get_properties(
-                sharesnapshot=self.snapshot,
-                lease_access_conditions=access_conditions,
-                timeout=timeout,
-                cls=deserialize_file_properties,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-        file_props.name = self.file_name
-        file_props.share = self.share_name
-        file_props.snapshot = self.snapshot
-        file_props.path = "/".join(self.file_path)
-        return file_props  # type: ignore
-
-    @distributed_trace_async
-    async def set_http_headers(self, content_settings,  # type: ContentSettings
-                               file_attributes="preserve",  # type: Union[str, NTFSAttributes]
-                               file_creation_time="preserve",  # type: Union[str, datetime]
-                               file_last_write_time="preserve",  # type: Union[str, datetime]
-                               file_permission=None,  # type: Optional[str]
-                               permission_key=None,  # type: Optional[str]
-                               **kwargs  # type: Any
-                               ):
-        # type: (...) -> Dict[str, Any]
-        """Sets HTTP headers on the file.
-
-        :param ~azure.storage.fileshare.ContentSettings content_settings:
-            ContentSettings object used to set file properties. Used to set content type, encoding,
-            language, disposition, md5, and cache control.
-        :param file_attributes:
-            The file system attributes for files and directories.
-            If not set, indicates preservation of existing values.
-            Here is an example for when the var type is str: 'Temporary|Archive'
-        :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
-        :param file_creation_time: Creation time for the file
-            Default value: Preserve.
-        :type file_creation_time: str or ~datetime.datetime
-        :param file_last_write_time: Last write time for the file
-            Default value: Preserve.
-        :type file_last_write_time: str or ~datetime.datetime
-        :param file_permission: If specified the permission (security
-            descriptor) shall be set for the directory/file. This header can be
-            used if Permission size is <= 8KB, else x-ms-file-permission-key
-            header shall be used. Default value: Inherit. If SDDL is specified as
-            input, it must have owner, group and dacl. Note: Only one of the
-            x-ms-file-permission or x-ms-file-permission-key should be specified.
-        :type file_permission: str
-        :param permission_key: Key of the permission to be set for the
-            directory/file. Note: Only one of the x-ms-file-permission or
-            x-ms-file-permission-key should be specified.
-        :type permission_key: str
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        file_content_length = kwargs.pop("size", None)
-        file_http_headers = FileHTTPHeaders(
-            file_cache_control=content_settings.cache_control,
-            file_content_type=content_settings.content_type,
-            file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
-            file_content_encoding=content_settings.content_encoding,
-            file_content_language=content_settings.content_language,
-            file_content_disposition=content_settings.content_disposition,
-        )
-        file_permission = _get_file_permission(file_permission, permission_key, 'preserve')
-        try:
-            return await self._client.file.set_http_headers(  # type: ignore
-                file_content_length=file_content_length,
-                file_http_headers=file_http_headers,
-                file_attributes=_str(file_attributes),
-                file_creation_time=_datetime_to_str(file_creation_time),
-                file_last_write_time=_datetime_to_str(file_last_write_time),
-                file_permission=file_permission,
-                file_permission_key=permission_key,
-                lease_access_conditions=access_conditions,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_file_metadata(self, metadata=None, **kwargs):  # type: ignore
-        # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any]
-        """Sets user-defined metadata for the specified file as one or more
-        name-value pairs.
-
-        Each call to this operation replaces all existing metadata
-        attached to the file. To remove all metadata from the file,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            Name-value pairs associated with the file as metadata.
-        :type metadata: dict(str, str)
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop("headers", {})
-        headers.update(add_metadata_headers(metadata))  # type: ignore
-        try:
-            return await self._client.file.set_metadata(  # type: ignore
-                metadata=metadata, lease_access_conditions=access_conditions,
-                timeout=timeout, cls=return_response_headers, headers=headers, **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def upload_range(  # type: ignore
-        self,
-        data,  # type: bytes
-        offset,  # type: int
-        length,  # type: int
-        **kwargs
-    ):
-        # type: (...) -> Dict[str, Any]
-        """Upload a range of bytes to a file.
-
-        :param bytes data:
-            The data to upload.
-        :param int offset:
-            Start of byte range to use for uploading a section of the file.
-            The range can be up to 4 MB in size.
-        :param int length:
-            Number of bytes to use for uploading a section of the file.
-            The range can be up to 4 MB in size.
-        :keyword bool validate_content:
-            If true, calculates an MD5 hash of the page content. The storage
-            service checks the hash of the content that has arrived
-            with the hash that was sent. This is primarily valuable for detecting
-            bitflips on the wire if using http instead of https as https (the default)
-            will already validate. Note that this MD5 hash is not stored with the
-            file.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :keyword str encoding:
-            Defaults to UTF-8.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: Dict[str, Any]
-        """
-        validate_content = kwargs.pop('validate_content', False)
-        timeout = kwargs.pop('timeout', None)
-        encoding = kwargs.pop('encoding', 'UTF-8')
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Encryption not supported.")
-        if isinstance(data, six.text_type):
-            data = data.encode(encoding)
-        end_range = offset + length - 1  # Reformat to an inclusive range index
-        content_range = 'bytes={0}-{1}'.format(offset, end_range)
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        try:
-            return await self._client.file.upload_range(  # type: ignore
-                range=content_range,
-                content_length=length,
-                optionalbody=data,
-                timeout=timeout,
-                validate_content=validate_content,
-                lease_access_conditions=access_conditions,
-                cls=return_response_headers,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def upload_range_from_url(self, source_url,
-                                    offset,
-                                    length,
-                                    source_offset,
-                                    **kwargs
-                                    ):
-        # type: (str, int, int, int, **Any) -> Dict[str, Any]
-        """
-        Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint.
-
-        :param int offset:
-            Start of byte range to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-        :param int length:
-            Number of bytes to use for updating a section of the file.
-            The range can be up to 4 MB in size.
-        :param str source_url:
-            A URL of up to 2 KB in length that specifies an Azure file or blob.
-            The value should be URL-encoded as it would appear in a request URI.
-            If the source is in another account, the source must either be public
-            or must be authenticated via a shared access signature. If the source
-            is public, no authentication is required.
-            Examples:
-            https://myaccount.file.core.windows.net/myshare/mydir/myfile
-            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
-        :param int source_offset:
-            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
-            The service will read the same number of bytes as the destination range (length-offset).
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        """
-        options = self._upload_range_from_url_options(
-            source_url=source_url,
-            offset=offset,
-            length=length,
-            source_offset=source_offset,
-            **kwargs
-        )
-        try:
-            return await self._client.file.upload_range_from_url(**options)  # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_ranges(  # type: ignore
-        self,
-        offset=None,  # type: Optional[int]
-        length=None,  # type: Optional[int]
-        **kwargs
-    ):
-        # type: (...) -> List[Dict[str, int]]
-        """Returns the list of valid ranges of a file.
-
-        :param int offset:
-            Specifies the start offset of bytes over which to get ranges.
-        :param int length:
-            Number of bytes to use over which to get ranges.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A list of valid ranges.
-        :rtype: List[dict[str, int]]
-        """
-        timeout = kwargs.pop('timeout', None)
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Unsupported method for encryption.")
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-
-        content_range = None
-        if offset is not None:
-            if length is not None:
-                end_range = offset + length - 1  # Reformat to an inclusive range index
-                content_range = "bytes={0}-{1}".format(offset, end_range)
-            else:
-                content_range = "bytes={0}-".format(offset)
-        try:
-            ranges = await self._client.file.get_range_list(
-                range=content_range,
-                sharesnapshot=self.snapshot,
-                lease_access_conditions=access_conditions,
-                timeout=timeout,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return [{"start": b.start, "end": b.end} for b in ranges]
-
-    @distributed_trace_async
-    async def clear_range(  # type: ignore
-        self,
-        offset,  # type: int
-        length,  # type: int
-        **kwargs
-    ):
-        # type: (...) -> Dict[str, Any]
-        """Clears the specified range and releases the space used in storage for
-        that range.
-
-        :param int offset:
-            Start of byte range to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-        :param int length:
-            Number of bytes to use for clearing a section of the file.
-            The range can be up to 4 MB in size.
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: Dict[str, Any]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        if self.require_encryption or (self.key_encryption_key is not None):
-            raise ValueError("Unsupported method for encryption.")
-
-        if offset is None or offset % 512 != 0:
-            raise ValueError("offset must be an integer that aligns with 512 bytes file size")
-        if length is None or length % 512 != 0:
-            raise ValueError("length must be an integer that aligns with 512 bytes file size")
-        end_range = length + offset - 1  # Reformat to an inclusive range index
-        content_range = "bytes={0}-{1}".format(offset, end_range)
-        try:
-            return await self._client.file.upload_range(  # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                content_length=0,
-                file_range_write="clear",
-                range=content_range,
-                lease_access_conditions=access_conditions,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def resize_file(self, size, **kwargs):
-        # type: (int, Any) -> Dict[str, Any]
-        """Resizes a file to the specified size.
-
-        :param int size:
-            Size to resize file to (in bytes)
-        :keyword lease:
-            Required if the file has an active lease. Value can be a ShareLeaseClient object
-            or the lease ID as a string.
-
-            .. versionadded:: 12.1.0
-
-        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: File-updated property dict (Etag and last modified).
-        :rtype: Dict[str, Any]
-        """
-        access_conditions = get_access_conditions(kwargs.pop('lease', None))
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return await self._client.file.set_http_headers(  # type: ignore
-                file_content_length=size,
-                file_attributes="preserve",
-                file_creation_time="preserve",
-                file_last_write_time="preserve",
-                file_permission="preserve",
-                lease_access_conditions=access_conditions,
-                cls=return_response_headers,
-                timeout=timeout,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_handles(self, **kwargs):
-        # type: (Any) -> AsyncItemPaged
-        """Lists handles for file.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of HandleItem
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem]
-        """
-        timeout = kwargs.pop('timeout', None)
-        results_per_page = kwargs.pop("results_per_page", None)
-        command = functools.partial(
-            self._client.file.list_handles,
-            sharesnapshot=self.snapshot,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command, results_per_page=results_per_page,
-            page_iterator_class=HandlesPaged)
-
-    @distributed_trace_async
-    async def close_handle(self, handle, **kwargs):
-        # type: (Union[str, HandleItem], Any) -> Dict[str, int]
-        """Close an open file handle.
-
-        :param handle:
-            A specific handle to close.
-        :type handle: str or ~azure.storage.fileshare.Handle
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        try:
-            handle_id = handle.id # type: ignore
-        except AttributeError:
-            handle_id = handle
-        if handle_id == '*':
-            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
-        try:
-            response = await self._client.file.force_close_handles(
-                handle_id,
-                marker=None,
-                sharesnapshot=self.snapshot,
-                cls=return_response_headers,
-                **kwargs
-            )
-            return {
-                'closed_handles_count': response.get('number_of_handles_closed', 0),
-                'failed_handles_count': response.get('number_of_handles_failed', 0)
-            }
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def close_all_handles(self, **kwargs):
-        # type: (Any) -> Dict[str, int]
-        """Close any open file handles.
-
-        This operation will block until the service has closed all open handles.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns:
-            The number of handles closed (this may be 0 if the specified handle was not found)
-            and the number of handles failed to close in a dict.
-        :rtype: dict[str, int]
-        """
-        timeout = kwargs.pop('timeout', None)
-        start_time = time.time()
-
-        try_close = True
-        continuation_token = None
-        total_closed = 0
-        total_failed = 0
-        while try_close:
-            try:
-                response = await self._client.file.force_close_handles(
-                    handle_id='*',
-                    timeout=timeout,
-                    marker=continuation_token,
-                    sharesnapshot=self.snapshot,
-                    cls=return_response_headers,
-                    **kwargs
-                )
-            except StorageErrorException as error:
-                process_storage_error(error)
-            continuation_token = response.get('marker')
-            try_close = bool(continuation_token)
-            total_closed += response.get('number_of_handles_closed', 0)
-            total_failed += response.get('number_of_handles_failed', 0)
-            if timeout:
-                timeout = max(0, timeout - (time.time() - start_time))
-        return {
-            'closed_handles_count': total_closed,
-            'failed_handles_count': total_failed
-        }
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_lease_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_lease_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_lease_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_lease_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,166 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
-    TypeVar, TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator_async import distributed_trace_async
-
-from .._shared.response_handlers import return_response_headers, process_storage_error
-from .._generated.models import (
-    StorageErrorException)
-from .._lease import ShareLeaseClient as LeaseClientBase
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    ShareFileClient = TypeVar("ShareFileClient")
-
-
-class ShareLeaseClient(LeaseClientBase):
-    """Creates a new ShareLeaseClient.
-
-    This client provides lease operations on a ShareFileClient.
-
-    :ivar str id:
-        The ID of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired.
-    :ivar str etag:
-        The ETag of the lease currently being maintained. This will be `None` if no
-        lease has yet been acquired or modified.
-    :ivar ~datetime.datetime last_modified:
-        The last modified timestamp of the lease currently being maintained.
-        This will be `None` if no lease has yet been acquired or modified.
-
-    :param client:
-        The client of the file to lease.
-    :type client: ~azure.storage.fileshare.aio.ShareFileClient
-    :param str lease_id:
-        A string representing the lease ID of an existing lease. This value does not
-        need to be specified in order to acquire a new lease, or break one.
-    """
-
-    def __enter__(self):
-        raise TypeError("Async lease must use 'async with'.")
-
-    def __exit__(self, *args):
-        self.release()
-
-    async def __aenter__(self):
-        return self
-
-    async def __aexit__(self, *args):
-        await self.release()
-
-    @distributed_trace_async
-    async def acquire(self, **kwargs):
-        # type: (int, Any) -> None
-        """Requests a new lease. This operation establishes and manages a lock on a
-        file for write and delete operations. If the file does not have an active lease,
-        the File service creates a lease on the file. If the file has an active lease,
-        you can only request a new lease using the active lease ID.
-
-
-        If the file does not have an active lease, the File service creates a
-        lease on the file and returns a new lease ID.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        try:
-            response = await self._client.acquire_lease(
-                timeout=kwargs.pop('timeout', None),
-                duration=-1,
-                proposed_lease_id=self.id,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-        self.etag = response.get('etag')  # type: str
-
-    @distributed_trace_async
-    async def release(self, **kwargs):
-        # type: (Any) -> None
-        """Releases the lease. The lease may be released if the lease ID specified on the request matches
-        that associated with the file. Releasing the lease allows another client to immediately acquire the lease
-        for the file as soon as the release is complete.
-
-
-        The lease may be released if the client lease id specified matches
-        that associated with the file. Releasing the lease allows another client
-        to immediately acquire the lease for the file as soon as the release is complete.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        try:
-            response = await self._client.release_lease(
-                lease_id=self.id,
-                timeout=kwargs.pop('timeout', None),
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace_async
-    async def change(self, proposed_lease_id, **kwargs):
-        # type: (str, Any) -> None
-        """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and
-        a new lease ID in x-ms-proposed-lease-id.
-
-
-        :param str proposed_lease_id:
-            Proposed lease ID, in a GUID string format. The File service returns 400
-            (Invalid request) if the proposed lease ID is not in the correct format.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: None
-        """
-        try:
-            response = await self._client.change_lease(
-                lease_id=self.id,
-                proposed_lease_id=proposed_lease_id,
-                timeout=kwargs.pop('timeout', None),
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        self.etag = response.get('etag')  # type: str
-        self.id = response.get('lease_id')  # type: str
-        self.last_modified = response.get('last_modified')   # type: datetime
-
-    @distributed_trace_async
-    async def break_lease(self, **kwargs):
-        # type: (Optional[int], Any) -> int
-        """Force breaks the lease if the file has an active lease. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID. An infinite lease breaks immediately.
-
-        Once a lease is broken, it cannot be changed. Any authorized request can break the lease;
-        the request is not required to specify a matching lease ID.
-        When a lease is successfully broken, the response indicates the interval
-        in seconds until a new lease can be acquired.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: Approximate time remaining in the lease period, in seconds.
-        :rtype: int
-        """
-        try:
-            response = await self._client.break_lease(
-                timeout=kwargs.pop('timeout', None),
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return response.get('lease_time') # type: ignore
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_models.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,178 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called, too-many-lines
-
-from azure.core.async_paging import AsyncPageIterator
-
-from .._shared.response_handlers import return_context_and_deserialized, process_storage_error
-from .._generated.models import StorageErrorException
-from .._generated.models import DirectoryItem
-from .._models import Handle, ShareProperties
-
-
-def _wrap_item(item):
-    if isinstance(item, DirectoryItem):
-        return {'name': item.name, 'is_directory': True}
-    return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False}
-
-
-class SharePropertiesPaged(AsyncPageIterator):
-    """An iterable of Share properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A file name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.fileshare.ShareProperties)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only shares whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of share names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(SharePropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items]  # pylint: disable=protected-access
-        return self._response.next_marker or None, self.current_page
-
-
-class HandlesPaged(AsyncPageIterator):
-    """An iterable of Handles.
-
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str marker: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(~azure.storage.fileshare.Handle)
-
-    :param callable command: Function to retrieve the next page of items.
-    :param int results_per_page: The maximum number of share names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, results_per_page=None, continuation_token=None):
-        super(HandlesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.current_page = [Handle._from_generated(h) for h in self._response.handle_list]  # pylint: disable=protected-access
-        return self._response.next_marker or None, self.current_page
-
-
-class DirectoryPropertiesPaged(AsyncPageIterator):
-    """An iterable for the contents of a directory.
-
-    This iterable will yield dicts for the contents of the directory. The dicts
-    will have the keys 'name' (str) and 'is_directory' (bool).
-    Items that are files (is_directory=False) will have an additional 'content_length' key.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A file name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str continuation_token: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :ivar current_page: The current page of listed results.
-    :vartype current_page: list(dict(str, Any))
-
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only directories whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of share names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(DirectoryPropertiesPaged, self).__init__(
-            get_next=self._get_next_cb,
-            extract_data=self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-        self.current_page = []
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                marker=continuation_token or None,
-                prefix=self.prefix,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items]
-        self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items])
-        return self._response.next_marker or None, self.current_page
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_client_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,563 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import ( # pylint: disable=unused-import
-    Optional, Union, Dict, Any, Iterable, TYPE_CHECKING
-)
-
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.tracing.decorator_async import distributed_trace_async
-from azure.core.pipeline import AsyncPipeline
-from .._shared.policies_async import ExponentialRetry
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
-from .._shared.request_handlers import add_metadata_headers, serialize_iso
-from .._shared.response_handlers import (
-    return_response_headers,
-    process_storage_error,
-    return_headers_and_deserialized)
-from .._generated.aio import AzureFileStorage
-from .._generated.version import VERSION
-from .._generated.models import (
-    StorageErrorException,
-    SignedIdentifier,
-    DeleteSnapshotsOptionType)
-from .._deserialize import deserialize_share_properties, deserialize_permission
-from .._serialize import get_api_version
-from .._share_client import ShareClient as ShareClientBase
-from ._directory_client_async import ShareDirectoryClient
-from ._file_client_async import ShareFileClient
-
-if TYPE_CHECKING:
-    from .._models import ShareProperties, AccessPolicy
-
-
-class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase):
-    """A client to interact with a specific share, although that share may not yet exist.
-
-    For operations relating to a specific directory or file in this share, the clients for
-    those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions.
-
-    :param str account_url:
-        The URI to the storage account. In order to create a client given the full URI to the share,
-        use the :func:`from_share_url` classmethod.
-    :param share_name:
-        The name of the share with which to interact.
-    :type share_name: str
-    :param str snapshot:
-        An optional share snapshot on which to operate. This can be the snapshot ID string
-        or the response returned from :func:`create_snapshot`.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword loop:
-        The event loop to run the asynchronous tasks.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-    """
-    def __init__( # type: ignore
-            self, account_url,  # type: str
-            share_name,  # type: str
-            snapshot=None,  # type: Optional[Union[str, Dict[str, Any]]]
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        loop = kwargs.pop('loop', None)
-        super(ShareClient, self).__init__(
-            account_url,
-            share_name=share_name,
-            snapshot=snapshot,
-            credential=credential,
-            loop=loop,
-            **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = loop
-
-    def get_directory_client(self, directory_path=None):
-        # type: (Optional[str]) -> ShareDirectoryClient
-        """Get a client to interact with the specified directory.
-        The directory need not already exist.
-
-        :param str directory_path:
-            Path to the specified directory.
-        :returns: A Directory Client.
-        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
-        """
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-
-        return ShareDirectoryClient(
-            self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop)
-
-    def get_file_client(self, file_path):
-        # type: (str) -> ShareFileClient
-        """Get a client to interact with the specified file.
-        The file need not already exist.
-
-        :param str file_path:
-            Path to the specified file.
-        :returns: A File Client.
-        :rtype: ~azure.storage.fileshare.aio.ShareFileClient
-        """
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-
-        return ShareFileClient(
-            self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot,
-            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop)
-
-    @distributed_trace_async
-    async def create_share(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Creates a new Share under the account. If a share with the
-        same name already exists, the operation fails.
-
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the share as metadata.
-        :keyword int quota:
-            The quota to be allotted.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share_async.py
-                :start-after: [START create_share]
-                :end-before: [END create_share]
-                :language: python
-                :dedent: 12
-                :caption: Creates a file share.
-        """
-        metadata = kwargs.pop('metadata', None)
-        quota = kwargs.pop('quota', None)
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-
-        try:
-            return await self._client.share.create( # type: ignore
-                timeout=timeout,
-                metadata=metadata,
-                quota=quota,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def create_snapshot( # type: ignore
-            self,
-            **kwargs # type: Optional[Any]
-        ):
-        # type: (...) -> Dict[str, Any]
-        """Creates a snapshot of the share.
-
-        A snapshot is a read-only version of a share that's taken at a point in time.
-        It can be read, copied, or deleted, but not modified. Snapshots provide a way
-        to back up a share as it appears at a moment in time.
-
-        A snapshot of a share has the same name as the base share from which the snapshot
-        is taken, with a DateTime value appended to indicate the time at which the
-        snapshot was taken.
-
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the share as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Snapshot ID, Etag, and last modified).
-        :rtype: dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share_async.py
-                :start-after: [START create_share_snapshot]
-                :end-before: [END create_share_snapshot]
-                :language: python
-                :dedent: 16
-                :caption: Creates a snapshot of the file share.
-        """
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return await self._client.share.create_snapshot( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def delete_share(
-            self, delete_snapshots=False, # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified share for deletion. The share is
-        later deleted during garbage collection.
-
-        :param bool delete_snapshots:
-            Indicates if snapshots are to be deleted.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share_async.py
-                :start-after: [START delete_share]
-                :end-before: [END delete_share]
-                :language: python
-                :dedent: 16
-                :caption: Deletes the share and any snapshots.
-        """
-        timeout = kwargs.pop('timeout', None)
-        delete_include = None
-        if delete_snapshots:
-            delete_include = DeleteSnapshotsOptionType.include
-        try:
-            await self._client.share.delete(
-                timeout=timeout,
-                sharesnapshot=self.snapshot,
-                delete_snapshots=delete_include,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_share_properties(self, **kwargs):
-        # type: (Any) -> ShareProperties
-        """Returns all user-defined metadata and system properties for the
-        specified share. The data returned does not include the shares's
-        list of files or directories.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: The share properties.
-        :rtype: ~azure.storage.fileshare.ShareProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_hello_world_async.py
-                :start-after: [START get_share_properties]
-                :end-before: [END get_share_properties]
-                :language: python
-                :dedent: 16
-                :caption: Gets the share properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            props = await self._client.share.get_properties(
-                timeout=timeout,
-                sharesnapshot=self.snapshot,
-                cls=deserialize_share_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        props.name = self.share_name
-        props.snapshot = self.snapshot
-        return props # type: ignore
-
-    @distributed_trace_async
-    async def set_share_quota(self, quota, **kwargs):
-        # type: (int, Any) ->  Dict[str, Any]
-        """Sets the quota for the share.
-
-        :param int quota:
-            Specifies the maximum size of the share, in gigabytes.
-            Must be greater than 0, and less than or equal to 5TB.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share_async.py
-                :start-after: [START set_share_quota]
-                :end-before: [END set_share_quota]
-                :language: python
-                :dedent: 16
-                :caption: Sets the share quota.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return await self._client.share.set_quota( # type: ignore
-                timeout=timeout,
-                quota=quota,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_share_metadata(self, metadata, **kwargs):
-        # type: (Dict[str, Any], Any) ->  Dict[str, Any]
-        """Sets the metadata for the share.
-
-        Each call to this operation replaces all existing metadata
-        attached to the share. To remove all metadata from the share,
-        call this operation with no metadata dict.
-
-        :param metadata:
-            Name-value pairs associated with the share as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share_async.py
-                :start-after: [START set_share_metadata]
-                :end-before: [END set_share_metadata]
-                :language: python
-                :dedent: 16
-                :caption: Sets the share metadata.
-        """
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata))
-        try:
-            return await self._client.share.set_metadata( # type: ignore
-                timeout=timeout,
-                cls=return_response_headers,
-                headers=headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_share_access_policy(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the permissions for the share. The permissions
-        indicate whether files in a share may be accessed publicly.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Access policy information in a dict.
-        :rtype: dict[str, Any]
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response, identifiers = await self._client.share.get_access_policy(
-                timeout=timeout,
-                cls=return_headers_and_deserialized,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return {
-            'public_access': response.get('share_public_access'),
-            'signed_identifiers': identifiers or []
-        }
-
-    @distributed_trace_async
-    async def set_share_access_policy(self, signed_identifiers, **kwargs):
-        # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str]
-        """Sets the permissions for the share, or stored access
-        policies that may be used with Shared Access Signatures. The permissions
-        indicate whether files in a share may be accessed publicly.
-
-        :param signed_identifiers:
-            A dictionary of access policies to associate with the share. The
-            dictionary may contain up to 5 elements. An empty dictionary
-            will clear the access policies set on the service.
-        :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: Share-updated property dict (Etag and last modified).
-        :rtype: dict(str, Any)
-        """
-        timeout = kwargs.pop('timeout', None)
-        if len(signed_identifiers) > 5:
-            raise ValueError(
-                'Too many access policies provided. The server does not support setting '
-                'more than 5 access policies on a single resource.')
-        identifiers = []
-        for key, value in signed_identifiers.items():
-            if value:
-                value.start = serialize_iso(value.start)
-                value.expiry = serialize_iso(value.expiry)
-            identifiers.append(SignedIdentifier(id=key, access_policy=value))
-        signed_identifiers = identifiers # type: ignore
-
-        try:
-            return await self._client.share.set_access_policy( # type: ignore
-                share_acl=signed_identifiers or None,
-                timeout=timeout,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_share_stats(self, **kwargs):
-        # type: (Any) -> int
-        """Gets the approximate size of the data stored on the share in bytes.
-
-        Note that this value may not include all recently created
-        or recently re-sized files.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The approximate size of the data (in bytes) stored on the share.
-        :rtype: int
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            stats = await self._client.share.get_statistics(
-                timeout=timeout,
-                **kwargs)
-            return stats.share_usage_bytes # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_directories_and_files( # type: ignore
-            self, directory_name=None,  # type: Optional[str]
-            name_starts_with=None,  # type: Optional[str]
-            marker=None,  # type: Optional[str]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> Iterable[Dict[str,str]]
-        """Lists the directories and files under the share.
-
-        :param str directory_name:
-            Name of a directory.
-        :param str name_starts_with:
-            Filters the results to return only directories whose names
-            begin with the specified prefix.
-        :param str marker:
-            An opaque continuation token. This value can be retrieved from the
-            next_marker field of a previous generator object. If specified,
-            this generator will begin returning results from this point.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_share_async.py
-                :start-after: [START share_list_files_in_dir]
-                :end-before: [END share_list_files_in_dir]
-                :language: python
-                :dedent: 16
-                :caption: List directories and files in the share.
-        """
-        timeout = kwargs.pop('timeout', None)
-        directory = self.get_directory_client(directory_name)
-        return directory.list_directories_and_files(
-            name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs)
-
-    @distributed_trace_async
-    async def create_permission_for_share(self, file_permission,  # type: str
-                                          **kwargs  # type: Any
-                                          ):
-        # type: (...) -> str
-        """Create a permission (a security descriptor) at the share level.
-
-        This 'permission' can be used for the files/directories in the share.
-        If a 'permission' already exists, it shall return the key of it, else
-        creates a new permission at the share level and return its key.
-
-        :param str file_permission:
-            File permission, a Portable SDDL
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A file permission key
-        :rtype: str
-        """
-        timeout = kwargs.pop('timeout', None)
-        options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs)
-        try:
-            return await self._client.share.create_permission(**options)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_permission_for_share(  # type: ignore
-            self, permission_key,  # type: str
-            **kwargs  # type: Any
-    ):
-        # type: (...) -> str
-        """Get a permission (a security descriptor) for a given key.
-
-        This 'permission' can be used for the files/directories in the share.
-
-        :param str permission_key:
-            Key of the file permission to retrieve
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A file permission (a portable SDDL)
-        :rtype: str
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            return await self._client.share.get_permission(  # type: ignore
-                file_permission_key=permission_key,
-                cls=deserialize_permission,
-                timeout=timeout,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def create_directory(self, directory_name, **kwargs):
-        # type: (str, Any) -> ShareDirectoryClient
-        """Creates a directory in the share and returns a client to interact
-        with the directory.
-
-        :param str directory_name:
-            The name of the directory.
-        :keyword dict(str,str) metadata:
-            Name-value pairs associated with the directory as metadata.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: ShareDirectoryClient
-        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
-        """
-        directory = self.get_directory_client(directory_name)
-        kwargs.setdefault('merge_span', True)
-        await directory.create_directory(**kwargs)
-        return directory # type: ignore
-
-    @distributed_trace_async
-    async def delete_directory(self, directory_name, **kwargs):
-        # type: (str, Any) -> None
-        """Marks the directory for deletion. The directory is
-        later deleted during garbage collection.
-
-        :param str directory_name:
-            The name of the directory.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-        """
-        directory = self.get_directory_client(directory_name)
-        await directory.delete_directory(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_service_client_async.py 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_service_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_service_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/fileshare/v2019_07_07/aio/_share_service_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,327 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List,
-    TYPE_CHECKING
-)
-
-from azure.core.async_paging import AsyncItemPaged
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.pipeline import AsyncPipeline
-from azure.core.tracing.decorator_async import distributed_trace_async
-
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
-from .._shared.response_handlers import process_storage_error
-from .._shared.policies_async import ExponentialRetry
-from .._generated.aio import AzureFileStorage
-from .._generated.models import StorageErrorException, StorageServiceProperties
-from .._generated.version import VERSION
-from .._share_service_client import ShareServiceClient as ShareServiceClientBase
-from .._serialize import get_api_version
-from ._share_client_async import ShareClient
-from ._models import SharePropertiesPaged
-from .._models import service_properties_deserialize
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from .._shared.models import ResourceTypes, AccountSasPermissions
-    from .._models import (
-        ShareProperties,
-        Metrics,
-        CorsRule,
-    )
-
-
-class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase):
-    """A client to interact with the File Share Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete shares within the account.
-    For operations relating to a specific share, a client for that entity
-    can also be retrieved using the :func:`get_share_client` function.
-
-    :param str account_url:
-        The URL to the file share storage account. Any other entities included
-        in the URL path (e.g. share or file) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credential with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string or an account
-        shared access key.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-
-        .. versionadded:: 12.1.0
-
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword loop:
-        The event loop to run the asynchronous tasks.
-    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/file_samples_authentication_async.py
-            :start-after: [START create_share_service_client]
-            :end-before: [END create_share_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Create the share service client with url and credential.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        loop = kwargs.pop('loop', None)
-        super(ShareServiceClient, self).__init__(
-            account_url,
-            credential=credential,
-            loop=loop,
-            **kwargs)
-        self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop)
-        self._client._config.version = get_api_version(kwargs, VERSION)  # pylint: disable=protected-access
-        self._loop = loop
-
-    @distributed_trace_async
-    async def get_service_properties(self, **kwargs):
-        # type: (Any) -> Dict[str, Any]
-        """Gets the properties of a storage account's File Share service, including
-        Azure Storage Analytics.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: A dictionary containing file service properties such as
-            analytics logging, hour/minute metrics, cors rules, etc.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service_async.py
-                :start-after: [START get_service_properties]
-                :end-before: [END get_service_properties]
-                :language: python
-                :dedent: 12
-                :caption: Get file share service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
-            return service_properties_deserialize(service_props)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_service_properties(
-            self, hour_metrics=None,  # type: Optional[Metrics]
-            minute_metrics=None,  # type: Optional[Metrics]
-            cors=None,  # type: Optional[List[CorsRule]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Sets the properties of a storage account's File Share service, including
-        Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the
-        existing settings on the service for that functionality are preserved.
-
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request
-            statistics grouped by API in hourly aggregates for files.
-        :type hour_metrics: ~azure.storage.fileshare.Metrics
-        :param minute_metrics:
-            The minute metrics settings provide request statistics
-            for each minute for files.
-        :type minute_metrics: ~azure.storage.fileshare.Metrics
-        :param cors:
-            You can include up to five CorsRule elements in the
-            list. If an empty list is specified, all CORS rules will be deleted,
-            and CORS will be disabled for the service.
-        :type cors: list(:class:`~azure.storage.fileshare.CorsRule`)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service_async.py
-                :start-after: [START set_service_properties]
-                :end-before: [END set_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Sets file share service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        props = StorageServiceProperties(
-            hour_metrics=hour_metrics,
-            minute_metrics=minute_metrics,
-            cors=cors
-        )
-        try:
-            await self._client.service.set_properties(props, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_shares(
-            self, name_starts_with=None,  # type: Optional[str]
-            include_metadata=False,  # type: Optional[bool]
-            include_snapshots=False, # type: Optional[bool]
-            **kwargs  # type: Any
-        ):  # type: (...) -> AsyncItemPaged
-        """Returns auto-paging iterable of dict-like ShareProperties under the specified account.
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all shares have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only shares whose names
-            begin with the specified name_starts_with.
-        :param bool include_metadata:
-            Specifies that share metadata be returned in the response.
-        :param bool include_snapshots:
-            Specifies that share snapshot be returned in the response.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An iterable (auto-paging) of ShareProperties.
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service_async.py
-                :start-after: [START fsc_list_shares]
-                :end-before: [END fsc_list_shares]
-                :language: python
-                :dedent: 16
-                :caption: List shares in the file share service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        include = []
-        if include_metadata:
-            include.append('metadata')
-        if include_snapshots:
-            include.append('snapshots')
-        results_per_page = kwargs.pop('results_per_page', None)
-        command = functools.partial(
-            self._client.service.list_shares_segment,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=SharePropertiesPaged)
-
-    @distributed_trace_async
-    async def create_share(
-            self, share_name,  # type: str
-            **kwargs
-        ):
-        # type: (...) -> ShareClient
-        """Creates a new share under the specified account. If the share
-        with the same name already exists, the operation fails. Returns a client with
-        which to interact with the newly created share.
-
-        :param str share_name: The name of the share to create.
-        :keyword dict(str,str) metadata:
-            A dict with name_value pairs to associate with the
-            share as metadata. Example:{'Category':'test'}
-        :keyword int quota:
-            Quota in bytes.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.fileshare.aio.ShareClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service_async.py
-                :start-after: [START fsc_create_shares]
-                :end-before: [END fsc_create_shares]
-                :language: python
-                :dedent: 12
-                :caption: Create a share in the file share service.
-        """
-        metadata = kwargs.pop('metadata', None)
-        quota = kwargs.pop('quota', None)
-        timeout = kwargs.pop('timeout', None)
-        share = self.get_share_client(share_name)
-        kwargs.setdefault('merge_span', True)
-        await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs)
-        return share
-
-    @distributed_trace_async
-    async def delete_share(
-            self, share_name,  # type: Union[ShareProperties, str]
-            delete_snapshots=False, # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Marks the specified share for deletion. The share is
-        later deleted during garbage collection.
-
-        :param share_name:
-            The share to delete. This can either be the name of the share,
-            or an instance of ShareProperties.
-        :type share_name: str or ~azure.storage.fileshare.ShareProperties
-        :param bool delete_snapshots:
-            Indicates if snapshots are to be deleted.
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service_async.py
-                :start-after: [START fsc_delete_shares]
-                :end-before: [END fsc_delete_shares]
-                :language: python
-                :dedent: 16
-                :caption: Delete a share in the file share service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        share = self.get_share_client(share_name)
-        kwargs.setdefault('merge_span', True)
-        await share.delete_share(
-            delete_snapshots=delete_snapshots, timeout=timeout, **kwargs)
-
-    def get_share_client(self, share, snapshot=None):
-        # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient
-        """Get a client to interact with the specified share.
-        The share need not already exist.
-
-        :param share:
-            The share. This can either be the name of the share,
-            or an instance of ShareProperties.
-        :type share: str or ~azure.storage.fileshare.ShareProperties
-        :param str snapshot:
-            An optional share snapshot on which to operate. This can be the snapshot ID string
-            or the response returned from :func:`create_snapshot`.
-        :returns: A ShareClient.
-        :rtype: ~azure.storage.fileshare.aio.ShareClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/file_samples_service_async.py
-                :start-after: [START get_share_client]
-                :end-before: [END get_share_client]
-                :language: python
-                :dedent: 8
-                :caption: Gets the share client.
-        """
-        try:
-            share_name = share.name
-        except AttributeError:
-            share_name = share
-
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-        return ShareClient(
-            self.url, share_name=share_name, snapshot=snapshot, credential=self.credential,
-            api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
-            _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/__init__.py	2025-06-18 05:27:42.000000000 +0000
@@ -1 +1 @@
-﻿__import__('pkg_resources').declare_namespace(__name__)
+﻿
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,60 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._version import VERSION
-from ._queue_client import QueueClient
-from ._queue_service_client import QueueServiceClient
-from ._shared_access_signature import generate_account_sas, generate_queue_sas
-from ._shared.policies import ExponentialRetry, LinearRetry
-from ._shared.models import(
-    LocationMode,
-    ResourceTypes,
-    AccountSasPermissions,
-    StorageErrorCode
-)
-from ._message_encoding import (
-    TextBase64EncodePolicy,
-    TextBase64DecodePolicy,
-    BinaryBase64EncodePolicy,
-    BinaryBase64DecodePolicy,
-)
-from ._models import (
-    QueueMessage,
-    QueueProperties,
-    QueueSasPermissions,
-    AccessPolicy,
-    QueueAnalyticsLogging,
-    Metrics,
-    CorsRule,
-    RetentionPolicy,
-)
-
-__version__ = VERSION
-
-__all__ = [
-    'QueueClient',
-    'QueueServiceClient',
-    'ExponentialRetry',
-    'LinearRetry',
-    'LocationMode',
-    'ResourceTypes',
-    'AccountSasPermissions',
-    'StorageErrorCode',
-    'QueueMessage',
-    'QueueProperties',
-    'QueueSasPermissions',
-    'AccessPolicy',
-    'TextBase64EncodePolicy',
-    'TextBase64DecodePolicy',
-    'BinaryBase64EncodePolicy',
-    'BinaryBase64DecodePolicy',
-    'QueueAnalyticsLogging',
-    'Metrics',
-    'CorsRule',
-    'RetentionPolicy',
-    'generate_account_sas',
-    'generate_queue_sas'
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,41 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=unused-argument
-
-from azure.core.exceptions import ResourceExistsError
-
-from ._shared.models import StorageErrorCode
-from ._models import QueueProperties
-
-
-def deserialize_metadata(response, obj, headers):
-    raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
-    return {k[10:]: v for k, v in raw_metadata.items()}
-
-
-def deserialize_queue_properties(response, obj, headers):
-    metadata = deserialize_metadata(response, obj, headers)
-    queue_properties = QueueProperties(
-        metadata=metadata,
-        **headers
-    )
-    return queue_properties
-
-
-def deserialize_queue_creation(response, obj, headers):
-    if response.status_code == 204:
-        error_code = StorageErrorCode.queue_already_exists
-        error = ResourceExistsError(
-            message="Queue already exists\nRequestId:{}\nTime:{}\nErrorCode:{}".format(
-                headers['x-ms-request-id'],
-                headers['Date'],
-                error_code
-            ),
-            response=response)
-        error.error_code = error_code
-        error.additional_info = {}
-        raise error
-    return headers
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,18 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._azure_queue_storage import AzureQueueStorage
-__all__ = ['AzureQueueStorage']
-
-from .version import VERSION
-
-__version__ = VERSION
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,68 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import PipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration import AzureQueueStorageConfiguration
-from azure.core.exceptions import map_error
-from .operations import ServiceOperations
-from .operations import QueueOperations
-from .operations import MessagesOperations
-from .operations import MessageIdOperations
-from . import models
-
-
-class AzureQueueStorage(object):
-    """AzureQueueStorage
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.queue.operations.ServiceOperations
-    :ivar queue: Queue operations
-    :vartype queue: azure.storage.queue.operations.QueueOperations
-    :ivar messages: Messages operations
-    :vartype messages: azure.storage.queue.operations.MessagesOperations
-    :ivar message_id: MessageId operations
-    :vartype message_id: azure.storage.queue.operations.MessageIdOperations
-
-    :param url: The URL of the service account, queue or message that is the
-     targe of the desired operation.
-    :type url: str
-    """
-
-    def __init__(self, url, **kwargs):
-
-        base_url = '{url}'
-        self._config = AzureQueueStorageConfiguration(url, **kwargs)
-        self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2018-03-28'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.queue = QueueOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.messages = MessagesOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.message_id = MessageIdOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    def close(self):
-        self._client.close()
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-    def __exit__(self, *exc_details):
-        self._client.__exit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,52 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from .version import VERSION
-
-
-class AzureQueueStorageConfiguration(Configuration):
-    """Configuration for AzureQueueStorage
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param url: The URL of the service account, queue or message that is the
-     targe of the desired operation.
-    :type url: str
-    :ivar version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    """
-
-    def __init__(self, url, **kwargs):
-
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-
-        super(AzureQueueStorageConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-azurequeuestorage/{}'.format(VERSION))
-        self.generate_client_request_id = True
-
-        self.url = url
-        self.version = "2018-03-28"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._azure_queue_storage_async import AzureQueueStorage
-__all__ = ['AzureQueueStorage']
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,69 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core import AsyncPipelineClient
-from msrest import Serializer, Deserializer
-
-from ._configuration_async import AzureQueueStorageConfiguration
-from azure.core.exceptions import map_error
-from .operations_async import ServiceOperations
-from .operations_async import QueueOperations
-from .operations_async import MessagesOperations
-from .operations_async import MessageIdOperations
-from .. import models
-
-
-class AzureQueueStorage(object):
-    """AzureQueueStorage
-
-
-    :ivar service: Service operations
-    :vartype service: azure.storage.queue.aio.operations_async.ServiceOperations
-    :ivar queue: Queue operations
-    :vartype queue: azure.storage.queue.aio.operations_async.QueueOperations
-    :ivar messages: Messages operations
-    :vartype messages: azure.storage.queue.aio.operations_async.MessagesOperations
-    :ivar message_id: MessageId operations
-    :vartype message_id: azure.storage.queue.aio.operations_async.MessageIdOperations
-
-    :param url: The URL of the service account, queue or message that is the
-     targe of the desired operation.
-    :type url: str
-    """
-
-    def __init__(
-            self, url, **kwargs):
-
-        base_url = '{url}'
-        self._config = AzureQueueStorageConfiguration(url, **kwargs)
-        self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
-
-        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
-        self.api_version = '2018-03-28'
-        self._serialize = Serializer(client_models)
-        self._deserialize = Deserializer(client_models)
-
-        self.service = ServiceOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.queue = QueueOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.messages = MessagesOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-        self.message_id = MessageIdOperations(
-            self._client, self._config, self._serialize, self._deserialize)
-
-    async def close(self):
-        await self._client.close()
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-    async def __aexit__(self, *exc_details):
-        await self._client.__aexit__(*exc_details)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,53 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-from ..version import VERSION
-
-
-class AzureQueueStorageConfiguration(Configuration):
-    """Configuration for AzureQueueStorage
-    Note that all parameters used to create this instance are saved as instance
-    attributes.
-
-    :param url: The URL of the service account, queue or message that is the
-     targe of the desired operation.
-    :type url: str
-    :ivar version: Specifies the version of the operation to use for this
-     request.
-    :type version: str
-    """
-
-    def __init__(self, url, **kwargs):
-
-        if url is None:
-            raise ValueError("Parameter 'url' must not be None.")
-
-        super(AzureQueueStorageConfiguration, self).__init__(**kwargs)
-        self._configure(**kwargs)
-
-        self.user_agent_policy.add_user_agent('azsdk-python-azurequeuestorage/{}'.format(VERSION))
-        self.generate_client_request_id = True
-        self.accept_language = None
-
-        self.url = url
-        self.version = "2018-03-28"
-
-    def _configure(self, **kwargs):
-        self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
-        self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
-        self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
-        self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
-        self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
-        self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
-        self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations_async import ServiceOperations
-from ._queue_operations_async import QueueOperations
-from ._messages_operations_async import MessagesOperations
-from ._message_id_operations_async import MessageIdOperations
-
-__all__ = [
-    'ServiceOperations',
-    'QueueOperations',
-    'MessagesOperations',
-    'MessageIdOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,184 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class MessageIdOperations:
-    """MessageIdOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    async def update(self, pop_receipt, visibilitytimeout, queue_message=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Update operation was introduced with version 2011-08-18 of the
-        Queue service API. The Update Message operation updates the visibility
-        timeout of a message. You can also use this operation to update the
-        contents of a message. A message must be in a format that can be
-        included in an XML request with UTF-8 encoding, and the encoded message
-        can be up to 64KB in size.
-
-        :param pop_receipt: Required. Specifies the valid pop receipt value
-         returned from an earlier call to the Get Messages or Update Message
-         operation.
-        :type pop_receipt: str
-        :param visibilitytimeout: Optional. Specifies the new visibility
-         timeout value, in seconds, relative to server time. The default value
-         is 30 seconds. A specified value must be larger than or equal to 1
-         second, and cannot be larger than 7 days, or larger than 2 hours on
-         REST protocol versions prior to version 2011-08-18. The visibility
-         timeout of a message can be set to a value later than the expiry time.
-        :type visibilitytimeout: int
-        :param queue_message: A Message object which can be stored in a Queue
-        :type queue_message: ~azure.storage.queue.models.QueueMessage
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.update.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['popreceipt'] = self._serialize.query("pop_receipt", pop_receipt, 'str')
-        query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        if queue_message is not None:
-            body_content = self._serialize.body(queue_message, 'QueueMessage')
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-popreceipt': self._deserialize('str', response.headers.get('x-ms-popreceipt')),
-                'x-ms-time-next-visible': self._deserialize('rfc-1123', response.headers.get('x-ms-time-next-visible')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    update.metadata = {'url': '/{queueName}/messages/{messageid}'}
-
-    async def delete(self, pop_receipt, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Delete operation deletes the specified message.
-
-        :param pop_receipt: Required. Specifies the valid pop receipt value
-         returned from an earlier call to the Get Messages or Update Message
-         operation.
-        :type pop_receipt: str
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['popreceipt'] = self._serialize.query("pop_receipt", pop_receipt, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{queueName}/messages/{messageid}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_messages_operations_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_messages_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_messages_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_messages_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,350 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class MessagesOperations:
-    """MessagesOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar peekonly: . Constant value: "true".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.peekonly = "true"
-
-    async def dequeue(self, number_of_messages=None, visibilitytimeout=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Dequeue operation retrieves one or more messages from the front of
-        the queue.
-
-        :param number_of_messages: Optional. A nonzero integer value that
-         specifies the number of messages to retrieve from the queue, up to a
-         maximum of 32. If fewer are visible, the visible messages are
-         returned. By default, a single message is retrieved from the queue
-         with this operation.
-        :type number_of_messages: int
-        :param visibilitytimeout: Optional. Specifies the new visibility
-         timeout value, in seconds, relative to server time. The default value
-         is 30 seconds. A specified value must be larger than or equal to 1
-         second, and cannot be larger than 7 days, or larger than 2 hours on
-         REST protocol versions prior to version 2011-08-18. The visibility
-         timeout of a message can be set to a value later than the expiry time.
-        :type visibilitytimeout: int
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.DequeuedMessageItem]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.dequeue.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if number_of_messages is not None:
-            query_parameters['numofmessages'] = self._serialize.query("number_of_messages", number_of_messages, 'int', minimum=1)
-        if visibilitytimeout is not None:
-            query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[DequeuedMessageItem]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    dequeue.metadata = {'url': '/{queueName}/messages'}
-
-    async def clear(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Clear operation deletes all messages from the specified queue.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.clear.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    clear.metadata = {'url': '/{queueName}/messages'}
-
-    async def enqueue(self, queue_message=None, visibilitytimeout=None, message_time_to_live=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Enqueue operation adds a new message to the back of the message
-        queue. A visibility timeout can also be specified to make the message
-        invisible until the visibility timeout expires. A message must be in a
-        format that can be included in an XML request with UTF-8 encoding. The
-        encoded message can be up to 64 KB in size for versions 2011-08-18 and
-        newer, or 8 KB in size for previous versions.
-
-        :param queue_message: A Message object which can be stored in a Queue
-        :type queue_message: ~azure.storage.queue.models.QueueMessage
-        :param visibilitytimeout: Optional. Specifies the new visibility
-         timeout value, in seconds, relative to server time. The default value
-         is 30 seconds. A specified value must be larger than or equal to 1
-         second, and cannot be larger than 7 days, or larger than 2 hours on
-         REST protocol versions prior to version 2011-08-18. The visibility
-         timeout of a message can be set to a value later than the expiry time.
-        :type visibilitytimeout: int
-        :param message_time_to_live: Optional. Specifies the time-to-live
-         interval for the message, in seconds. Prior to version 2017-07-29, the
-         maximum time-to-live allowed is 7 days. For version 2017-07-29 or
-         later, the maximum time-to-live can be any positive number, as well as
-         -1 indicating that the message does not expire. If this parameter is
-         omitted, the default time-to-live is 7 days.
-        :type message_time_to_live: int
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.EnqueuedMessage]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.enqueue.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if visibilitytimeout is not None:
-            query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
-        if message_time_to_live is not None:
-            query_parameters['messagettl'] = self._serialize.query("message_time_to_live", message_time_to_live, 'int', minimum=-1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        if queue_message is not None:
-            body_content = self._serialize.body(queue_message, 'QueueMessage')
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 201:
-            deserialized = self._deserialize('[EnqueuedMessage]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    enqueue.metadata = {'url': '/{queueName}/messages'}
-
-    async def peek(self, number_of_messages=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The Peek operation retrieves one or more messages from the front of the
-        queue, but does not alter the visibility of the message.
-
-        :param number_of_messages: Optional. A nonzero integer value that
-         specifies the number of messages to retrieve from the queue, up to a
-         maximum of 32. If fewer are visible, the visible messages are
-         returned. By default, a single message is retrieved from the queue
-         with this operation.
-        :type number_of_messages: int
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.PeekedMessageItem]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.peek.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if number_of_messages is not None:
-            query_parameters['numofmessages'] = self._serialize.query("number_of_messages", number_of_messages, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['peekonly'] = self._serialize.query("self.peekonly", self.peekonly, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[PeekedMessageItem]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    peek.metadata = {'url': '/{queueName}/messages'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_queue_operations_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_queue_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_queue_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_queue_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,432 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class QueueOperations:
-    """QueueOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    async def create(self, timeout=None, metadata=None, request_id=None, *, cls=None, **kwargs):
-        """creates a new queue under the given account.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Include this parameter to specify that the
-         queue's metadata be returned as part of the response body. Note that
-         metadata requested with this parameter must be stored in accordance
-         with the naming restrictions imposed by the 2009-09-19 version of the
-         Queue service. Beginning with this version, all metadata names must
-         adhere to the naming conventions for C# identifiers.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201, 204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{queueName}'}
-
-    async def delete(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """operation permanently deletes the specified queue.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{queueName}'}
-
-    async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Retrieves user-defined metadata and queue properties on the specified
-        queue. Metadata is associated with the queue as name-values pairs.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'x-ms-approximate-messages-count': self._deserialize('int', response.headers.get('x-ms-approximate-messages-count')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{queueName}'}
-
-    async def set_metadata(self, timeout=None, metadata=None, request_id=None, *, cls=None, **kwargs):
-        """sets user-defined metadata on the specified queue. Metadata is
-        associated with the queue as name-value pairs.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Include this parameter to specify that the
-         queue's metadata be returned as part of the response body. Note that
-         metadata requested with this parameter must be stored in accordance
-         with the naming restrictions imposed by the 2009-09-19 version of the
-         Queue service. Beginning with this version, all metadata names must
-         adhere to the naming conventions for C# identifiers.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{queueName}'}
-
-    async def get_access_policy(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """returns details about any stored access policies specified on the queue
-        that may be used with Shared Access Signatures.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.SignedIdentifier]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.get_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[SignedIdentifier]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_access_policy.metadata = {'url': '/{queueName}'}
-
-    async def set_access_policy(self, queue_acl=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """sets stored access policies for the queue that may be used with Shared
-        Access Signatures.
-
-        :param queue_acl: the acls for the queue
-        :type queue_acl: list[~azure.storage.queue.models.SignedIdentifier]
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.set_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}}
-        if queue_acl is not None:
-            body_content = self._serialize.body(queue_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_access_policy.metadata = {'url': '/{queueName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,349 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from ... import models
-
-
-class ServiceOperations:
-    """ServiceOperations async operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "service".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer) -> None:
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "service"
-
-    async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Sets properties for a storage account's Queue service endpoint,
-        including properties for Storage Analytics and CORS (Cross-Origin
-        Resource Sharing) rules.
-
-        :param storage_service_properties: The StorageService properties.
-        :type storage_service_properties:
-         ~azure.storage.queue.models.StorageServiceProperties
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/'}
-
-    async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """gets the properties of a storage account's Queue service, including
-        properties for Storage Analytics and CORS (Cross-Origin Resource
-        Sharing) rules.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceProperties or the result of cls(response)
-        :rtype: ~azure.storage.queue.models.StorageServiceProperties
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceProperties', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_properties.metadata = {'url': '/'}
-
-    async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """Retrieves statistics related to replication for the Queue service. It
-        is only available on the secondary location endpoint when read-access
-        geo-redundant replication is enabled for the storage account.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceStats or the result of cls(response)
-        :rtype: ~azure.storage.queue.models.StorageServiceStats
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "stats"
-
-        # Construct URL
-        url = self.get_statistics.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceStats', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_statistics.metadata = {'url': '/'}
-
-    async def list_queues_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
-        """The List Queues Segment operation returns a list of the queues under
-        the specified account.
-
-        :param prefix: Filters the results to return only queues whose name
-         begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of queues to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all queues remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of queues to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5000, the server will return up to 5000 items. Note that
-         if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify that the queues's
-         metadata be returned as part of the response body.
-        :type include: list[str or
-         ~azure.storage.queue.models.ListQueuesIncludeType]
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListQueuesSegmentResponse or the result of cls(response)
-        :rtype: ~azure.storage.queue.models.ListQueuesSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_queues_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListQueuesIncludeType]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListQueuesSegmentResponse', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_queues_segment.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,72 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-try:
-    from ._models_py3 import AccessPolicy
-    from ._models_py3 import CorsRule
-    from ._models_py3 import DequeuedMessageItem
-    from ._models_py3 import EnqueuedMessage
-    from ._models_py3 import GeoReplication
-    from ._models_py3 import ListQueuesSegmentResponse
-    from ._models_py3 import Logging
-    from ._models_py3 import Metrics
-    from ._models_py3 import PeekedMessageItem
-    from ._models_py3 import QueueItem
-    from ._models_py3 import QueueMessage
-    from ._models_py3 import RetentionPolicy
-    from ._models_py3 import SignedIdentifier
-    from ._models_py3 import StorageError, StorageErrorException
-    from ._models_py3 import StorageServiceProperties
-    from ._models_py3 import StorageServiceStats
-except (SyntaxError, ImportError):
-    from ._models import AccessPolicy
-    from ._models import CorsRule
-    from ._models import DequeuedMessageItem
-    from ._models import EnqueuedMessage
-    from ._models import GeoReplication
-    from ._models import ListQueuesSegmentResponse
-    from ._models import Logging
-    from ._models import Metrics
-    from ._models import PeekedMessageItem
-    from ._models import QueueItem
-    from ._models import QueueMessage
-    from ._models import RetentionPolicy
-    from ._models import SignedIdentifier
-    from ._models import StorageError, StorageErrorException
-    from ._models import StorageServiceProperties
-    from ._models import StorageServiceStats
-from ._azure_queue_storage_enums import (
-    GeoReplicationStatusType,
-    ListQueuesIncludeType,
-    StorageErrorCode,
-)
-
-__all__ = [
-    'AccessPolicy',
-    'CorsRule',
-    'DequeuedMessageItem',
-    'EnqueuedMessage',
-    'GeoReplication',
-    'ListQueuesSegmentResponse',
-    'Logging',
-    'Metrics',
-    'PeekedMessageItem',
-    'QueueItem',
-    'QueueMessage',
-    'RetentionPolicy',
-    'SignedIdentifier',
-    'StorageError', 'StorageErrorException',
-    'StorageServiceProperties',
-    'StorageServiceStats',
-    'StorageErrorCode',
-    'GeoReplicationStatusType',
-    'ListQueuesIncludeType',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,85 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-class StorageErrorCode(str, Enum):
-
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-    invalid_marker = "InvalidMarker"
-    message_not_found = "MessageNotFound"
-    message_too_large = "MessageTooLarge"
-    pop_receipt_mismatch = "PopReceiptMismatch"
-    queue_already_exists = "QueueAlreadyExists"
-    queue_being_deleted = "QueueBeingDeleted"
-    queue_disabled = "QueueDisabled"
-    queue_not_empty = "QueueNotEmpty"
-    queue_not_found = "QueueNotFound"
-    authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch"
-    authorization_protocol_mismatch = "AuthorizationProtocolMismatch"
-    authorization_permission_mismatch = "AuthorizationPermissionMismatch"
-    authorization_service_mismatch = "AuthorizationServiceMismatch"
-    authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-
-
-class GeoReplicationStatusType(str, Enum):
-
-    live = "live"
-    bootstrap = "bootstrap"
-    unavailable = "unavailable"
-
-
-class ListQueuesIncludeType(str, Enum):
-
-    metadata = "metadata"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,631 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class AccessPolicy(Model):
-    """An Access policy.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required. the date-time the policy is active
-    :type start: str
-    :param expiry: Required. the date-time the policy expires
-    :type expiry: str
-    :param permission: Required. the permissions for the acl policy
-    :type permission: str
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'expiry': {'required': True},
-        'permission': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-        'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(AccessPolicy, self).__init__(**kwargs)
-        self.start = kwargs.get('start', None)
-        self.expiry = kwargs.get('expiry', None)
-        self.permission = kwargs.get('permission', None)
-
-
-class CorsRule(Model):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param allowed_origins: Required. The origin domains that are permitted to
-     make a request against the storage service via CORS. The origin domain is
-     the domain from which the request originates. Note that the origin must be
-     an exact case-sensitive match with the origin that the user age sends to
-     the service. You can also use the wildcard character '*' to allow all
-     origin domains to make requests via CORS.
-    :type allowed_origins: str
-    :param allowed_methods: Required. The methods (HTTP request verbs) that
-     the origin domain may use for a CORS request. (comma separated)
-    :type allowed_methods: str
-    :param allowed_headers: Required. the request headers that the origin
-     domain may specify on the CORS request.
-    :type allowed_headers: str
-    :param exposed_headers: Required. The response headers that may be sent in
-     the response to the CORS request and exposed by the browser to the request
-     issuer
-    :type exposed_headers: str
-    :param max_age_in_seconds: Required. The maximum amount time that a
-     browser should cache the preflight OPTIONS request.
-    :type max_age_in_seconds: int
-    """
-
-    _validation = {
-        'allowed_origins': {'required': True},
-        'allowed_methods': {'required': True},
-        'allowed_headers': {'required': True},
-        'exposed_headers': {'required': True},
-        'max_age_in_seconds': {'required': True, 'minimum': 0},
-    }
-
-    _attribute_map = {
-        'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
-        'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
-        'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
-        'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
-        'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(CorsRule, self).__init__(**kwargs)
-        self.allowed_origins = kwargs.get('allowed_origins', None)
-        self.allowed_methods = kwargs.get('allowed_methods', None)
-        self.allowed_headers = kwargs.get('allowed_headers', None)
-        self.exposed_headers = kwargs.get('exposed_headers', None)
-        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
-
-
-class DequeuedMessageItem(Model):
-    """The object returned in the QueueMessageList array when calling Get Messages
-    on a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_id: Required. The Id of the Message.
-    :type message_id: str
-    :param insertion_time: Required. The time the Message was inserted into
-     the Queue.
-    :type insertion_time: datetime
-    :param expiration_time: Required. The time that the Message will expire
-     and be automatically deleted.
-    :type expiration_time: datetime
-    :param pop_receipt: Required. This value is required to delete the
-     Message. If deletion fails using this popreceipt then the message has been
-     dequeued by another client.
-    :type pop_receipt: str
-    :param time_next_visible: Required. The time that the message will again
-     become visible in the Queue.
-    :type time_next_visible: datetime
-    :param dequeue_count: Required. The number of times the message has been
-     dequeued.
-    :type dequeue_count: long
-    :param message_text: Required. The content of the Message.
-    :type message_text: str
-    """
-
-    _validation = {
-        'message_id': {'required': True},
-        'insertion_time': {'required': True},
-        'expiration_time': {'required': True},
-        'pop_receipt': {'required': True},
-        'time_next_visible': {'required': True},
-        'dequeue_count': {'required': True},
-        'message_text': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}},
-        'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}},
-        'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}},
-        'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}},
-        'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}},
-        'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}},
-        'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}},
-    }
-    _xml_map = {
-        'name': 'QueueMessage'
-    }
-
-    def __init__(self, **kwargs):
-        super(DequeuedMessageItem, self).__init__(**kwargs)
-        self.message_id = kwargs.get('message_id', None)
-        self.insertion_time = kwargs.get('insertion_time', None)
-        self.expiration_time = kwargs.get('expiration_time', None)
-        self.pop_receipt = kwargs.get('pop_receipt', None)
-        self.time_next_visible = kwargs.get('time_next_visible', None)
-        self.dequeue_count = kwargs.get('dequeue_count', None)
-        self.message_text = kwargs.get('message_text', None)
-
-
-class EnqueuedMessage(Model):
-    """The object returned in the QueueMessageList array when calling Put Message
-    on a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_id: Required. The Id of the Message.
-    :type message_id: str
-    :param insertion_time: Required. The time the Message was inserted into
-     the Queue.
-    :type insertion_time: datetime
-    :param expiration_time: Required. The time that the Message will expire
-     and be automatically deleted.
-    :type expiration_time: datetime
-    :param pop_receipt: Required. This value is required to delete the
-     Message. If deletion fails using this popreceipt then the message has been
-     dequeued by another client.
-    :type pop_receipt: str
-    :param time_next_visible: Required. The time that the message will again
-     become visible in the Queue.
-    :type time_next_visible: datetime
-    """
-
-    _validation = {
-        'message_id': {'required': True},
-        'insertion_time': {'required': True},
-        'expiration_time': {'required': True},
-        'pop_receipt': {'required': True},
-        'time_next_visible': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}},
-        'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}},
-        'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}},
-        'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}},
-        'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}},
-    }
-    _xml_map = {
-        'name': 'QueueMessage'
-    }
-
-    def __init__(self, **kwargs):
-        super(EnqueuedMessage, self).__init__(**kwargs)
-        self.message_id = kwargs.get('message_id', None)
-        self.insertion_time = kwargs.get('insertion_time', None)
-        self.expiration_time = kwargs.get('expiration_time', None)
-        self.pop_receipt = kwargs.get('pop_receipt', None)
-        self.time_next_visible = kwargs.get('time_next_visible', None)
-
-
-class GeoReplication(Model):
-    """GeoReplication.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param status: Required. The status of the secondary location. Possible
-     values include: 'live', 'bootstrap', 'unavailable'
-    :type status: str or ~azure.storage.queue.models.GeoReplicationStatusType
-    :param last_sync_time: Required. A GMT date/time value, to the second. All
-     primary writes preceding this value are guaranteed to be available for
-     read operations at the secondary. Primary writes after this point in time
-     may or may not be available for reads.
-    :type last_sync_time: datetime
-    """
-
-    _validation = {
-        'status': {'required': True},
-        'last_sync_time': {'required': True},
-    }
-
-    _attribute_map = {
-        'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
-        'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(GeoReplication, self).__init__(**kwargs)
-        self.status = kwargs.get('status', None)
-        self.last_sync_time = kwargs.get('last_sync_time', None)
-
-
-class ListQueuesSegmentResponse(Model):
-    """The object returned when calling List Queues on a Queue Service.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param prefix: Required.
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results: Required.
-    :type max_results: int
-    :param queue_items:
-    :type queue_items: list[~azure.storage.queue.models.QueueItem]
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'prefix': {'required': True},
-        'max_results': {'required': True},
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'queue_items': {'key': 'QueueItems', 'type': '[QueueItem]', 'xml': {'name': 'Queues', 'itemsName': 'Queues', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, **kwargs):
-        super(ListQueuesSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = kwargs.get('service_endpoint', None)
-        self.prefix = kwargs.get('prefix', None)
-        self.marker = kwargs.get('marker', None)
-        self.max_results = kwargs.get('max_results', None)
-        self.queue_items = kwargs.get('queue_items', None)
-        self.next_marker = kwargs.get('next_marker', None)
-
-
-class Logging(Model):
-    """Azure Analytics Logging settings.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: Required. The version of Storage Analytics to configure.
-    :type version: str
-    :param delete: Required. Indicates whether all delete requests should be
-     logged.
-    :type delete: bool
-    :param read: Required. Indicates whether all read requests should be
-     logged.
-    :type read: bool
-    :param write: Required. Indicates whether all write requests should be
-     logged.
-    :type write: bool
-    :param retention_policy: Required.
-    :type retention_policy: ~azure.storage.queue.models.RetentionPolicy
-    """
-
-    _validation = {
-        'version': {'required': True},
-        'delete': {'required': True},
-        'read': {'required': True},
-        'write': {'required': True},
-        'retention_policy': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
-        'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
-        'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(Logging, self).__init__(**kwargs)
-        self.version = kwargs.get('version', None)
-        self.delete = kwargs.get('delete', None)
-        self.read = kwargs.get('read', None)
-        self.write = kwargs.get('write', None)
-        self.retention_policy = kwargs.get('retention_policy', None)
-
-
-class Metrics(Model):
-    """Metrics.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: The version of Storage Analytics to configure.
-    :type version: str
-    :param enabled: Required. Indicates whether metrics are enabled for the
-     Queue service.
-    :type enabled: bool
-    :param include_apis: Indicates whether metrics should generate summary
-     statistics for called API operations.
-    :type include_apis: bool
-    :param retention_policy:
-    :type retention_policy: ~azure.storage.queue.models.RetentionPolicy
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(Metrics, self).__init__(**kwargs)
-        self.version = kwargs.get('version', None)
-        self.enabled = kwargs.get('enabled', None)
-        self.include_apis = kwargs.get('include_apis', None)
-        self.retention_policy = kwargs.get('retention_policy', None)
-
-
-class PeekedMessageItem(Model):
-    """The object returned in the QueueMessageList array when calling Peek
-    Messages on a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_id: Required. The Id of the Message.
-    :type message_id: str
-    :param insertion_time: Required. The time the Message was inserted into
-     the Queue.
-    :type insertion_time: datetime
-    :param expiration_time: Required. The time that the Message will expire
-     and be automatically deleted.
-    :type expiration_time: datetime
-    :param dequeue_count: Required. The number of times the message has been
-     dequeued.
-    :type dequeue_count: long
-    :param message_text: Required. The content of the Message.
-    :type message_text: str
-    """
-
-    _validation = {
-        'message_id': {'required': True},
-        'insertion_time': {'required': True},
-        'expiration_time': {'required': True},
-        'dequeue_count': {'required': True},
-        'message_text': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}},
-        'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}},
-        'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}},
-        'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}},
-        'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}},
-    }
-    _xml_map = {
-        'name': 'QueueMessage'
-    }
-
-    def __init__(self, **kwargs):
-        super(PeekedMessageItem, self).__init__(**kwargs)
-        self.message_id = kwargs.get('message_id', None)
-        self.insertion_time = kwargs.get('insertion_time', None)
-        self.expiration_time = kwargs.get('expiration_time', None)
-        self.dequeue_count = kwargs.get('dequeue_count', None)
-        self.message_text = kwargs.get('message_text', None)
-
-
-class QueueItem(Model):
-    """An Azure Storage Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required. The name of the Queue.
-    :type name: str
-    :param metadata:
-    :type metadata: dict[str, str]
-    """
-
-    _validation = {
-        'name': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Queue'
-    }
-
-    def __init__(self, **kwargs):
-        super(QueueItem, self).__init__(**kwargs)
-        self.name = kwargs.get('name', None)
-        self.metadata = kwargs.get('metadata', None)
-
-
-class QueueMessage(Model):
-    """A Message object which can be stored in a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_text: Required. The content of the message
-    :type message_text: str
-    """
-
-    _validation = {
-        'message_text': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(QueueMessage, self).__init__(**kwargs)
-        self.message_text = kwargs.get('message_text', None)
-
-
-class RetentionPolicy(Model):
-    """the retention policy.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether a retention policy is enabled
-     for the storage service
-    :type enabled: bool
-    :param days: Indicates the number of days that metrics or logging or
-     soft-deleted data should be retained. All data older than this value will
-     be deleted
-    :type days: int
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-        'days': {'minimum': 1},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(RetentionPolicy, self).__init__(**kwargs)
-        self.enabled = kwargs.get('enabled', None)
-        self.days = kwargs.get('days', None)
-
-
-class SignedIdentifier(Model):
-    """signed identifier.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param id: Required. a unique id
-    :type id: str
-    :param access_policy: The access policy
-    :type access_policy: ~azure.storage.queue.models.AccessPolicy
-    """
-
-    _validation = {
-        'id': {'required': True},
-    }
-
-    _attribute_map = {
-        'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
-        'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(SignedIdentifier, self).__init__(**kwargs)
-        self.id = kwargs.get('id', None)
-        self.access_policy = kwargs.get('access_policy', None)
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param message:
-    :type message: str
-    """
-
-    _attribute_map = {
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageError, self).__init__(**kwargs)
-        self.message = kwargs.get('message', None)
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageServiceProperties(Model):
-    """Storage Service Properties.
-
-    :param logging: Azure Analytics Logging settings
-    :type logging: ~azure.storage.queue.models.Logging
-    :param hour_metrics: A summary of request statistics grouped by API in
-     hourly aggregates for queues
-    :type hour_metrics: ~azure.storage.queue.models.Metrics
-    :param minute_metrics: a summary of request statistics grouped by API in
-     minute aggregates for queues
-    :type minute_metrics: ~azure.storage.queue.models.Metrics
-    :param cors: The set of CORS rules.
-    :type cors: list[~azure.storage.queue.models.CorsRule]
-    """
-
-    _attribute_map = {
-        'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
-        'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
-        'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
-        'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageServiceProperties, self).__init__(**kwargs)
-        self.logging = kwargs.get('logging', None)
-        self.hour_metrics = kwargs.get('hour_metrics', None)
-        self.minute_metrics = kwargs.get('minute_metrics', None)
-        self.cors = kwargs.get('cors', None)
-
-
-class StorageServiceStats(Model):
-    """Stats for the storage service.
-
-    :param geo_replication: Geo-Replication information for the Secondary
-     Storage Service
-    :type geo_replication: ~azure.storage.queue.models.GeoReplication
-    """
-
-    _attribute_map = {
-        'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, **kwargs):
-        super(StorageServiceStats, self).__init__(**kwargs)
-        self.geo_replication = kwargs.get('geo_replication', None)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,631 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from msrest.serialization import Model
-from azure.core.exceptions import HttpResponseError
-
-
-class AccessPolicy(Model):
-    """An Access policy.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param start: Required. the date-time the policy is active
-    :type start: str
-    :param expiry: Required. the date-time the policy expires
-    :type expiry: str
-    :param permission: Required. the permissions for the acl policy
-    :type permission: str
-    """
-
-    _validation = {
-        'start': {'required': True},
-        'expiry': {'required': True},
-        'permission': {'required': True},
-    }
-
-    _attribute_map = {
-        'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
-        'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
-        'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, start: str, expiry: str, permission: str, **kwargs) -> None:
-        super(AccessPolicy, self).__init__(**kwargs)
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class CorsRule(Model):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param allowed_origins: Required. The origin domains that are permitted to
-     make a request against the storage service via CORS. The origin domain is
-     the domain from which the request originates. Note that the origin must be
-     an exact case-sensitive match with the origin that the user age sends to
-     the service. You can also use the wildcard character '*' to allow all
-     origin domains to make requests via CORS.
-    :type allowed_origins: str
-    :param allowed_methods: Required. The methods (HTTP request verbs) that
-     the origin domain may use for a CORS request. (comma separated)
-    :type allowed_methods: str
-    :param allowed_headers: Required. the request headers that the origin
-     domain may specify on the CORS request.
-    :type allowed_headers: str
-    :param exposed_headers: Required. The response headers that may be sent in
-     the response to the CORS request and exposed by the browser to the request
-     issuer
-    :type exposed_headers: str
-    :param max_age_in_seconds: Required. The maximum amount time that a
-     browser should cache the preflight OPTIONS request.
-    :type max_age_in_seconds: int
-    """
-
-    _validation = {
-        'allowed_origins': {'required': True},
-        'allowed_methods': {'required': True},
-        'allowed_headers': {'required': True},
-        'exposed_headers': {'required': True},
-        'max_age_in_seconds': {'required': True, 'minimum': 0},
-    }
-
-    _attribute_map = {
-        'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
-        'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
-        'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
-        'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
-        'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None:
-        super(CorsRule, self).__init__(**kwargs)
-        self.allowed_origins = allowed_origins
-        self.allowed_methods = allowed_methods
-        self.allowed_headers = allowed_headers
-        self.exposed_headers = exposed_headers
-        self.max_age_in_seconds = max_age_in_seconds
-
-
-class DequeuedMessageItem(Model):
-    """The object returned in the QueueMessageList array when calling Get Messages
-    on a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_id: Required. The Id of the Message.
-    :type message_id: str
-    :param insertion_time: Required. The time the Message was inserted into
-     the Queue.
-    :type insertion_time: datetime
-    :param expiration_time: Required. The time that the Message will expire
-     and be automatically deleted.
-    :type expiration_time: datetime
-    :param pop_receipt: Required. This value is required to delete the
-     Message. If deletion fails using this popreceipt then the message has been
-     dequeued by another client.
-    :type pop_receipt: str
-    :param time_next_visible: Required. The time that the message will again
-     become visible in the Queue.
-    :type time_next_visible: datetime
-    :param dequeue_count: Required. The number of times the message has been
-     dequeued.
-    :type dequeue_count: long
-    :param message_text: Required. The content of the Message.
-    :type message_text: str
-    """
-
-    _validation = {
-        'message_id': {'required': True},
-        'insertion_time': {'required': True},
-        'expiration_time': {'required': True},
-        'pop_receipt': {'required': True},
-        'time_next_visible': {'required': True},
-        'dequeue_count': {'required': True},
-        'message_text': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}},
-        'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}},
-        'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}},
-        'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}},
-        'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}},
-        'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}},
-        'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}},
-    }
-    _xml_map = {
-        'name': 'QueueMessage'
-    }
-
-    def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_receipt: str, time_next_visible, dequeue_count: int, message_text: str, **kwargs) -> None:
-        super(DequeuedMessageItem, self).__init__(**kwargs)
-        self.message_id = message_id
-        self.insertion_time = insertion_time
-        self.expiration_time = expiration_time
-        self.pop_receipt = pop_receipt
-        self.time_next_visible = time_next_visible
-        self.dequeue_count = dequeue_count
-        self.message_text = message_text
-
-
-class EnqueuedMessage(Model):
-    """The object returned in the QueueMessageList array when calling Put Message
-    on a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_id: Required. The Id of the Message.
-    :type message_id: str
-    :param insertion_time: Required. The time the Message was inserted into
-     the Queue.
-    :type insertion_time: datetime
-    :param expiration_time: Required. The time that the Message will expire
-     and be automatically deleted.
-    :type expiration_time: datetime
-    :param pop_receipt: Required. This value is required to delete the
-     Message. If deletion fails using this popreceipt then the message has been
-     dequeued by another client.
-    :type pop_receipt: str
-    :param time_next_visible: Required. The time that the message will again
-     become visible in the Queue.
-    :type time_next_visible: datetime
-    """
-
-    _validation = {
-        'message_id': {'required': True},
-        'insertion_time': {'required': True},
-        'expiration_time': {'required': True},
-        'pop_receipt': {'required': True},
-        'time_next_visible': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}},
-        'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}},
-        'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}},
-        'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}},
-        'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}},
-    }
-    _xml_map = {
-        'name': 'QueueMessage'
-    }
-
-    def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_receipt: str, time_next_visible, **kwargs) -> None:
-        super(EnqueuedMessage, self).__init__(**kwargs)
-        self.message_id = message_id
-        self.insertion_time = insertion_time
-        self.expiration_time = expiration_time
-        self.pop_receipt = pop_receipt
-        self.time_next_visible = time_next_visible
-
-
-class GeoReplication(Model):
-    """GeoReplication.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param status: Required. The status of the secondary location. Possible
-     values include: 'live', 'bootstrap', 'unavailable'
-    :type status: str or ~azure.storage.queue.models.GeoReplicationStatusType
-    :param last_sync_time: Required. A GMT date/time value, to the second. All
-     primary writes preceding this value are guaranteed to be available for
-     read operations at the secondary. Primary writes after this point in time
-     may or may not be available for reads.
-    :type last_sync_time: datetime
-    """
-
-    _validation = {
-        'status': {'required': True},
-        'last_sync_time': {'required': True},
-    }
-
-    _attribute_map = {
-        'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
-        'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, status, last_sync_time, **kwargs) -> None:
-        super(GeoReplication, self).__init__(**kwargs)
-        self.status = status
-        self.last_sync_time = last_sync_time
-
-
-class ListQueuesSegmentResponse(Model):
-    """The object returned when calling List Queues on a Queue Service.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param service_endpoint: Required.
-    :type service_endpoint: str
-    :param prefix: Required.
-    :type prefix: str
-    :param marker:
-    :type marker: str
-    :param max_results: Required.
-    :type max_results: int
-    :param queue_items:
-    :type queue_items: list[~azure.storage.queue.models.QueueItem]
-    :param next_marker: Required.
-    :type next_marker: str
-    """
-
-    _validation = {
-        'service_endpoint': {'required': True},
-        'prefix': {'required': True},
-        'max_results': {'required': True},
-        'next_marker': {'required': True},
-    }
-
-    _attribute_map = {
-        'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
-        'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
-        'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
-        'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
-        'queue_items': {'key': 'QueueItems', 'type': '[QueueItem]', 'xml': {'name': 'Queues', 'itemsName': 'Queues', 'wrapped': True}},
-        'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
-    }
-    _xml_map = {
-        'name': 'EnumerationResults'
-    }
-
-    def __init__(self, *, service_endpoint: str, prefix: str, max_results: int, next_marker: str, marker: str=None, queue_items=None, **kwargs) -> None:
-        super(ListQueuesSegmentResponse, self).__init__(**kwargs)
-        self.service_endpoint = service_endpoint
-        self.prefix = prefix
-        self.marker = marker
-        self.max_results = max_results
-        self.queue_items = queue_items
-        self.next_marker = next_marker
-
-
-class Logging(Model):
-    """Azure Analytics Logging settings.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: Required. The version of Storage Analytics to configure.
-    :type version: str
-    :param delete: Required. Indicates whether all delete requests should be
-     logged.
-    :type delete: bool
-    :param read: Required. Indicates whether all read requests should be
-     logged.
-    :type read: bool
-    :param write: Required. Indicates whether all write requests should be
-     logged.
-    :type write: bool
-    :param retention_policy: Required.
-    :type retention_policy: ~azure.storage.queue.models.RetentionPolicy
-    """
-
-    _validation = {
-        'version': {'required': True},
-        'delete': {'required': True},
-        'read': {'required': True},
-        'write': {'required': True},
-        'retention_policy': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
-        'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
-        'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None:
-        super(Logging, self).__init__(**kwargs)
-        self.version = version
-        self.delete = delete
-        self.read = read
-        self.write = write
-        self.retention_policy = retention_policy
-
-
-class Metrics(Model):
-    """Metrics.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param version: The version of Storage Analytics to configure.
-    :type version: str
-    :param enabled: Required. Indicates whether metrics are enabled for the
-     Queue service.
-    :type enabled: bool
-    :param include_apis: Indicates whether metrics should generate summary
-     statistics for called API operations.
-    :type include_apis: bool
-    :param retention_policy:
-    :type retention_policy: ~azure.storage.queue.models.RetentionPolicy
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-    }
-
-    _attribute_map = {
-        'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
-        'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None:
-        super(Metrics, self).__init__(**kwargs)
-        self.version = version
-        self.enabled = enabled
-        self.include_apis = include_apis
-        self.retention_policy = retention_policy
-
-
-class PeekedMessageItem(Model):
-    """The object returned in the QueueMessageList array when calling Peek
-    Messages on a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_id: Required. The Id of the Message.
-    :type message_id: str
-    :param insertion_time: Required. The time the Message was inserted into
-     the Queue.
-    :type insertion_time: datetime
-    :param expiration_time: Required. The time that the Message will expire
-     and be automatically deleted.
-    :type expiration_time: datetime
-    :param dequeue_count: Required. The number of times the message has been
-     dequeued.
-    :type dequeue_count: long
-    :param message_text: Required. The content of the Message.
-    :type message_text: str
-    """
-
-    _validation = {
-        'message_id': {'required': True},
-        'insertion_time': {'required': True},
-        'expiration_time': {'required': True},
-        'dequeue_count': {'required': True},
-        'message_text': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}},
-        'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}},
-        'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}},
-        'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}},
-        'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}},
-    }
-    _xml_map = {
-        'name': 'QueueMessage'
-    }
-
-    def __init__(self, *, message_id: str, insertion_time, expiration_time, dequeue_count: int, message_text: str, **kwargs) -> None:
-        super(PeekedMessageItem, self).__init__(**kwargs)
-        self.message_id = message_id
-        self.insertion_time = insertion_time
-        self.expiration_time = expiration_time
-        self.dequeue_count = dequeue_count
-        self.message_text = message_text
-
-
-class QueueItem(Model):
-    """An Azure Storage Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param name: Required. The name of the Queue.
-    :type name: str
-    :param metadata:
-    :type metadata: dict[str, str]
-    """
-
-    _validation = {
-        'name': {'required': True},
-    }
-
-    _attribute_map = {
-        'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
-        'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
-    }
-    _xml_map = {
-        'name': 'Queue'
-    }
-
-    def __init__(self, *, name: str, metadata=None, **kwargs) -> None:
-        super(QueueItem, self).__init__(**kwargs)
-        self.name = name
-        self.metadata = metadata
-
-
-class QueueMessage(Model):
-    """A Message object which can be stored in a Queue.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param message_text: Required. The content of the message
-    :type message_text: str
-    """
-
-    _validation = {
-        'message_text': {'required': True},
-    }
-
-    _attribute_map = {
-        'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, message_text: str, **kwargs) -> None:
-        super(QueueMessage, self).__init__(**kwargs)
-        self.message_text = message_text
-
-
-class RetentionPolicy(Model):
-    """the retention policy.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param enabled: Required. Indicates whether a retention policy is enabled
-     for the storage service
-    :type enabled: bool
-    :param days: Indicates the number of days that metrics or logging or
-     soft-deleted data should be retained. All data older than this value will
-     be deleted
-    :type days: int
-    """
-
-    _validation = {
-        'enabled': {'required': True},
-        'days': {'minimum': 1},
-    }
-
-    _attribute_map = {
-        'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
-        'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None:
-        super(RetentionPolicy, self).__init__(**kwargs)
-        self.enabled = enabled
-        self.days = days
-
-
-class SignedIdentifier(Model):
-    """signed identifier.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param id: Required. a unique id
-    :type id: str
-    :param access_policy: The access policy
-    :type access_policy: ~azure.storage.queue.models.AccessPolicy
-    """
-
-    _validation = {
-        'id': {'required': True},
-    }
-
-    _attribute_map = {
-        'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
-        'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, id: str, access_policy=None, **kwargs) -> None:
-        super(SignedIdentifier, self).__init__(**kwargs)
-        self.id = id
-        self.access_policy = access_policy
-
-
-class StorageError(Model):
-    """StorageError.
-
-    :param message:
-    :type message: str
-    """
-
-    _attribute_map = {
-        'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, message: str=None, **kwargs) -> None:
-        super(StorageError, self).__init__(**kwargs)
-        self.message = message
-
-
-class StorageErrorException(HttpResponseError):
-    """Server responsed with exception of type: 'StorageError'.
-
-    :param deserialize: A deserializer
-    :param response: Server response to be deserialized.
-    """
-
-    def __init__(self, response, deserialize, *args):
-
-      model_name = 'StorageError'
-      self.error = deserialize(model_name, response)
-      if self.error is None:
-          self.error = deserialize.dependencies[model_name]()
-      super(StorageErrorException, self).__init__(response=response)
-
-
-class StorageServiceProperties(Model):
-    """Storage Service Properties.
-
-    :param logging: Azure Analytics Logging settings
-    :type logging: ~azure.storage.queue.models.Logging
-    :param hour_metrics: A summary of request statistics grouped by API in
-     hourly aggregates for queues
-    :type hour_metrics: ~azure.storage.queue.models.Metrics
-    :param minute_metrics: a summary of request statistics grouped by API in
-     minute aggregates for queues
-    :type minute_metrics: ~azure.storage.queue.models.Metrics
-    :param cors: The set of CORS rules.
-    :type cors: list[~azure.storage.queue.models.CorsRule]
-    """
-
-    _attribute_map = {
-        'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
-        'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
-        'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
-        'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None:
-        super(StorageServiceProperties, self).__init__(**kwargs)
-        self.logging = logging
-        self.hour_metrics = hour_metrics
-        self.minute_metrics = minute_metrics
-        self.cors = cors
-
-
-class StorageServiceStats(Model):
-    """Stats for the storage service.
-
-    :param geo_replication: Geo-Replication information for the Secondary
-     Storage Service
-    :type geo_replication: ~azure.storage.queue.models.GeoReplication
-    """
-
-    _attribute_map = {
-        'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
-    }
-    _xml_map = {
-    }
-
-    def __init__(self, *, geo_replication=None, **kwargs) -> None:
-        super(StorageServiceStats, self).__init__(**kwargs)
-        self.geo_replication = geo_replication
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,22 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from ._service_operations import ServiceOperations
-from ._queue_operations import QueueOperations
-from ._messages_operations import MessagesOperations
-from ._message_id_operations import MessageIdOperations
-
-__all__ = [
-    'ServiceOperations',
-    'QueueOperations',
-    'MessagesOperations',
-    'MessageIdOperations',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,184 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class MessageIdOperations(object):
-    """MessageIdOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    def update(self, pop_receipt, visibilitytimeout, queue_message=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Update operation was introduced with version 2011-08-18 of the
-        Queue service API. The Update Message operation updates the visibility
-        timeout of a message. You can also use this operation to update the
-        contents of a message. A message must be in a format that can be
-        included in an XML request with UTF-8 encoding, and the encoded message
-        can be up to 64KB in size.
-
-        :param pop_receipt: Required. Specifies the valid pop receipt value
-         returned from an earlier call to the Get Messages or Update Message
-         operation.
-        :type pop_receipt: str
-        :param visibilitytimeout: Optional. Specifies the new visibility
-         timeout value, in seconds, relative to server time. The default value
-         is 30 seconds. A specified value must be larger than or equal to 1
-         second, and cannot be larger than 7 days, or larger than 2 hours on
-         REST protocol versions prior to version 2011-08-18. The visibility
-         timeout of a message can be set to a value later than the expiry time.
-        :type visibilitytimeout: int
-        :param queue_message: A Message object which can be stored in a Queue
-        :type queue_message: ~azure.storage.queue.models.QueueMessage
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.update.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['popreceipt'] = self._serialize.query("pop_receipt", pop_receipt, 'str')
-        query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        if queue_message is not None:
-            body_content = self._serialize.body(queue_message, 'QueueMessage')
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-popreceipt': self._deserialize('str', response.headers.get('x-ms-popreceipt')),
-                'x-ms-time-next-visible': self._deserialize('rfc-1123', response.headers.get('x-ms-time-next-visible')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    update.metadata = {'url': '/{queueName}/messages/{messageid}'}
-
-    def delete(self, pop_receipt, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Delete operation deletes the specified message.
-
-        :param pop_receipt: Required. Specifies the valid pop receipt value
-         returned from an earlier call to the Get Messages or Update Message
-         operation.
-        :type pop_receipt: str
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        query_parameters['popreceipt'] = self._serialize.query("pop_receipt", pop_receipt, 'str')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{queueName}/messages/{messageid}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_messages_operations.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_messages_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_messages_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_messages_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,350 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class MessagesOperations(object):
-    """MessagesOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar peekonly: . Constant value: "true".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.peekonly = "true"
-
-    def dequeue(self, number_of_messages=None, visibilitytimeout=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Dequeue operation retrieves one or more messages from the front of
-        the queue.
-
-        :param number_of_messages: Optional. A nonzero integer value that
-         specifies the number of messages to retrieve from the queue, up to a
-         maximum of 32. If fewer are visible, the visible messages are
-         returned. By default, a single message is retrieved from the queue
-         with this operation.
-        :type number_of_messages: int
-        :param visibilitytimeout: Optional. Specifies the new visibility
-         timeout value, in seconds, relative to server time. The default value
-         is 30 seconds. A specified value must be larger than or equal to 1
-         second, and cannot be larger than 7 days, or larger than 2 hours on
-         REST protocol versions prior to version 2011-08-18. The visibility
-         timeout of a message can be set to a value later than the expiry time.
-        :type visibilitytimeout: int
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.DequeuedMessageItem]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.dequeue.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if number_of_messages is not None:
-            query_parameters['numofmessages'] = self._serialize.query("number_of_messages", number_of_messages, 'int', minimum=1)
-        if visibilitytimeout is not None:
-            query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[DequeuedMessageItem]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    dequeue.metadata = {'url': '/{queueName}/messages'}
-
-    def clear(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Clear operation deletes all messages from the specified queue.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.clear.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    clear.metadata = {'url': '/{queueName}/messages'}
-
-    def enqueue(self, queue_message=None, visibilitytimeout=None, message_time_to_live=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Enqueue operation adds a new message to the back of the message
-        queue. A visibility timeout can also be specified to make the message
-        invisible until the visibility timeout expires. A message must be in a
-        format that can be included in an XML request with UTF-8 encoding. The
-        encoded message can be up to 64 KB in size for versions 2011-08-18 and
-        newer, or 8 KB in size for previous versions.
-
-        :param queue_message: A Message object which can be stored in a Queue
-        :type queue_message: ~azure.storage.queue.models.QueueMessage
-        :param visibilitytimeout: Optional. Specifies the new visibility
-         timeout value, in seconds, relative to server time. The default value
-         is 30 seconds. A specified value must be larger than or equal to 1
-         second, and cannot be larger than 7 days, or larger than 2 hours on
-         REST protocol versions prior to version 2011-08-18. The visibility
-         timeout of a message can be set to a value later than the expiry time.
-        :type visibilitytimeout: int
-        :param message_time_to_live: Optional. Specifies the time-to-live
-         interval for the message, in seconds. Prior to version 2017-07-29, the
-         maximum time-to-live allowed is 7 days. For version 2017-07-29 or
-         later, the maximum time-to-live can be any positive number, as well as
-         -1 indicating that the message does not expire. If this parameter is
-         omitted, the default time-to-live is 7 days.
-        :type message_time_to_live: int
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.EnqueuedMessage]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.enqueue.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if visibilitytimeout is not None:
-            query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0)
-        if message_time_to_live is not None:
-            query_parameters['messagettl'] = self._serialize.query("message_time_to_live", message_time_to_live, 'int', minimum=-1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        if queue_message is not None:
-            body_content = self._serialize.body(queue_message, 'QueueMessage')
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.post(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 201:
-            deserialized = self._deserialize('[EnqueuedMessage]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    enqueue.metadata = {'url': '/{queueName}/messages'}
-
-    def peek(self, number_of_messages=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """The Peek operation retrieves one or more messages from the front of the
-        queue, but does not alter the visibility of the message.
-
-        :param number_of_messages: Optional. A nonzero integer value that
-         specifies the number of messages to retrieve from the queue, up to a
-         maximum of 32. If fewer are visible, the visible messages are
-         returned. By default, a single message is retrieved from the queue
-         with this operation.
-        :type number_of_messages: int
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.PeekedMessageItem]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.peek.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if number_of_messages is not None:
-            query_parameters['numofmessages'] = self._serialize.query("number_of_messages", number_of_messages, 'int', minimum=1)
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['peekonly'] = self._serialize.query("self.peekonly", self.peekonly, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[PeekedMessageItem]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    peek.metadata = {'url': '/{queueName}/messages'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_queue_operations.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_queue_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_queue_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_queue_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,432 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class QueueOperations(object):
-    """QueueOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-
-    def create(self, timeout=None, metadata=None, request_id=None, cls=None, **kwargs):
-        """creates a new queue under the given account.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Include this parameter to specify that the
-         queue's metadata be returned as part of the response body. Note that
-         metadata requested with this parameter must be stored in accordance
-         with the naming restrictions imposed by the 2009-09-19 version of the
-         Queue service. Beginning with this version, all metadata names must
-         adhere to the naming conventions for C# identifiers.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.create.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [201, 204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    create.metadata = {'url': '/{queueName}'}
-
-    def delete(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """operation permanently deletes the specified queue.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        # Construct URL
-        url = self.delete.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.delete(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    delete.metadata = {'url': '/{queueName}'}
-
-    def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """Retrieves user-defined metadata and queue properties on the specified
-        queue. Metadata is associated with the queue as name-values pairs.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
-                'x-ms-approximate-messages-count': self._deserialize('int', response.headers.get('x-ms-approximate-messages-count')),
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    get_properties.metadata = {'url': '/{queueName}'}
-
-    def set_metadata(self, timeout=None, metadata=None, request_id=None, cls=None, **kwargs):
-        """sets user-defined metadata on the specified queue. Metadata is
-        associated with the queue as name-value pairs.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param metadata: Optional. Include this parameter to specify that the
-         queue's metadata be returned as part of the response body. Note that
-         metadata requested with this parameter must be stored in accordance
-         with the naming restrictions imposed by the 2009-09-19 version of the
-         Queue service. Beginning with this version, all metadata names must
-         adhere to the naming conventions for C# identifiers.
-        :type metadata: str
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "metadata"
-
-        # Construct URL
-        url = self.set_metadata.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        if metadata is not None:
-            header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_metadata.metadata = {'url': '/{queueName}'}
-
-    def get_access_policy(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """returns details about any stored access policies specified on the queue
-        that may be used with Shared Access Signatures.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: list or the result of cls(response)
-        :rtype: list[~azure.storage.queue.models.SignedIdentifier]
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.get_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('[SignedIdentifier]', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_access_policy.metadata = {'url': '/{queueName}'}
-
-    def set_access_policy(self, queue_acl=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """sets stored access policies for the queue that may be used with Shared
-        Access Signatures.
-
-        :param queue_acl: the acls for the queue
-        :type queue_acl: list[~azure.storage.queue.models.SignedIdentifier]
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "acl"
-
-        # Construct URL
-        url = self.set_access_policy.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}}
-        if queue_acl is not None:
-            body_content = self._serialize.body(queue_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
-        else:
-            body_content = None
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [204]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_access_policy.metadata = {'url': '/{queueName}'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,349 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-from azure.core.exceptions import map_error
-
-from .. import models
-
-
-class ServiceOperations(object):
-    """ServiceOperations operations.
-
-    You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
-
-    :param client: Client for service requests.
-    :param config: Configuration of service client.
-    :param serializer: An object model serializer.
-    :param deserializer: An object model deserializer.
-    :ivar restype: . Constant value: "service".
-    """
-
-    models = models
-
-    def __init__(self, client, config, serializer, deserializer):
-
-        self._client = client
-        self._serialize = serializer
-        self._deserialize = deserializer
-
-        self._config = config
-        self.restype = "service"
-
-    def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs):
-        """Sets properties for a storage account's Queue service endpoint,
-        including properties for Storage Analytics and CORS (Cross-Origin
-        Resource Sharing) rules.
-
-        :param storage_service_properties: The StorageService properties.
-        :type storage_service_properties:
-         ~azure.storage.queue.models.StorageServiceProperties
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.set_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct body
-        body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
-
-        # Construct and send request
-        request = self._client.put(url, query_parameters, header_parameters, body_content)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [202]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        if cls:
-            response_headers = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-            return cls(response, None, response_headers)
-    set_properties.metadata = {'url': '/'}
-
-    def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """gets the properties of a storage account's Queue service, including
-        properties for Storage Analytics and CORS (Cross-Origin Resource
-        Sharing) rules.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceProperties or the result of cls(response)
-        :rtype: ~azure.storage.queue.models.StorageServiceProperties
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "properties"
-
-        # Construct URL
-        url = self.get_properties.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceProperties', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_properties.metadata = {'url': '/'}
-
-    def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs):
-        """Retrieves statistics related to replication for the Queue service. It
-        is only available on the secondary location endpoint when read-access
-        geo-redundant replication is enabled for the storage account.
-
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: StorageServiceStats or the result of cls(response)
-        :rtype: ~azure.storage.queue.models.StorageServiceStats
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "stats"
-
-        # Construct URL
-        url = self.get_statistics.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('StorageServiceStats', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    get_statistics.metadata = {'url': '/'}
-
-    def list_queues_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
-        """The List Queues Segment operation returns a list of the queues under
-        the specified account.
-
-        :param prefix: Filters the results to return only queues whose name
-         begins with the specified prefix.
-        :type prefix: str
-        :param marker: A string value that identifies the portion of the list
-         of queues to be returned with the next listing operation. The
-         operation returns the NextMarker value within the response body if the
-         listing operation did not return all queues remaining to be listed
-         with the current page. The NextMarker value can be used as the value
-         for the marker parameter in a subsequent call to request the next page
-         of list items. The marker value is opaque to the client.
-        :type marker: str
-        :param maxresults: Specifies the maximum number of queues to return.
-         If the request does not specify maxresults, or specifies a value
-         greater than 5000, the server will return up to 5000 items. Note that
-         if the listing operation crosses a partition boundary, then the
-         service will return a continuation token for retrieving the remainder
-         of the results. For this reason, it is possible that the service will
-         return fewer results than specified by maxresults, or than the default
-         of 5000.
-        :type maxresults: int
-        :param include: Include this parameter to specify that the queues's
-         metadata be returned as part of the response body.
-        :type include: list[str or
-         ~azure.storage.queue.models.ListQueuesIncludeType]
-        :param timeout: The The timeout parameter is expressed in seconds. For
-         more information, see <a
-         href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting
-         Timeouts for Queue Service Operations.</a>
-        :type timeout: int
-        :param request_id: Provides a client-generated, opaque value with a 1
-         KB character limit that is recorded in the analytics logs when storage
-         analytics logging is enabled.
-        :type request_id: str
-        :param callable cls: A custom type or function that will be passed the
-         direct response
-        :return: ListQueuesSegmentResponse or the result of cls(response)
-        :rtype: ~azure.storage.queue.models.ListQueuesSegmentResponse
-        :raises:
-         :class:`StorageErrorException<azure.storage.queue.models.StorageErrorException>`
-        """
-        error_map = kwargs.pop('error_map', None)
-        comp = "list"
-
-        # Construct URL
-        url = self.list_queues_segment.metadata['url']
-        path_format_arguments = {
-            'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
-        }
-        url = self._client.format_url(url, **path_format_arguments)
-
-        # Construct parameters
-        query_parameters = {}
-        if prefix is not None:
-            query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
-        if marker is not None:
-            query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
-        if maxresults is not None:
-            query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
-        if include is not None:
-            query_parameters['include'] = self._serialize.query("include", include, '[ListQueuesIncludeType]', div=',')
-        if timeout is not None:
-            query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
-        query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
-
-        # Construct headers
-        header_parameters = {}
-        header_parameters['Accept'] = 'application/xml'
-        header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
-        if request_id is not None:
-            header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
-
-        # Construct and send request
-        request = self._client.get(url, query_parameters, header_parameters)
-        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
-        response = pipeline_response.http_response
-
-        if response.status_code not in [200]:
-            map_error(status_code=response.status_code, response=response, error_map=error_map)
-            raise models.StorageErrorException(response, self._deserialize)
-
-        header_dict = {}
-        deserialized = None
-        if response.status_code == 200:
-            deserialized = self._deserialize('ListQueuesSegmentResponse', response)
-            header_dict = {
-                'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
-                'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
-                'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
-                'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
-            }
-
-        if cls:
-            return cls(response, deserialized, header_dict)
-
-        return deserialized
-    list_queues_segment.metadata = {'url': '/'}
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-VERSION = "2018-03-28"
-
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,150 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=unused-argument
-
-from base64 import b64encode, b64decode
-
-import sys
-import six
-from azure.core.exceptions import DecodeError
-
-from ._shared.encryption import decrypt_queue_message, encrypt_queue_message
-
-
-class MessageEncodePolicy(object):
-
-    def __init__(self):
-        self.require_encryption = False
-        self.key_encryption_key = None
-        self.resolver = None
-
-    def __call__(self, content):
-        if content:
-            content = self.encode(content)
-            if self.key_encryption_key is not None:
-                content = encrypt_queue_message(content, self.key_encryption_key)
-        return content
-
-    def configure(self, require_encryption, key_encryption_key, resolver):
-        self.require_encryption = require_encryption
-        self.key_encryption_key = key_encryption_key
-        self.resolver = resolver
-        if self.require_encryption and not self.key_encryption_key:
-            raise ValueError("Encryption required but no key was provided.")
-
-    def encode(self, content):
-        raise NotImplementedError("Must be implemented by child class.")
-
-
-class MessageDecodePolicy(object):
-
-    def __init__(self):
-        self.require_encryption = False
-        self.key_encryption_key = None
-        self.resolver = None
-
-    def __call__(self, response, obj, headers):
-        for message in obj:
-            if message.message_text in [None, "", b""]:
-                continue
-            content = message.message_text
-            if (self.key_encryption_key is not None) or (self.resolver is not None):
-                content = decrypt_queue_message(
-                    content, response,
-                    self.require_encryption,
-                    self.key_encryption_key,
-                    self.resolver)
-            message.message_text = self.decode(content, response)
-        return obj
-
-    def configure(self, require_encryption, key_encryption_key, resolver):
-        self.require_encryption = require_encryption
-        self.key_encryption_key = key_encryption_key
-        self.resolver = resolver
-
-    def decode(self, content, response):
-        raise NotImplementedError("Must be implemented by child class.")
-
-
-class TextBase64EncodePolicy(MessageEncodePolicy):
-    """Base 64 message encoding policy for text messages.
-
-    Encodes text (unicode) messages to base 64. If the input content
-    is not text, a TypeError will be raised. Input text must support UTF-8.
-    """
-
-    def encode(self, content):
-        if not isinstance(content, six.text_type):
-            raise TypeError("Message content must be text for base 64 encoding.")
-        return b64encode(content.encode('utf-8')).decode('utf-8')
-
-
-class TextBase64DecodePolicy(MessageDecodePolicy):
-    """Message decoding policy for base 64-encoded messages into text.
-
-    Decodes base64-encoded messages to text (unicode). If the input content
-    is not valid base 64, a DecodeError will be raised. Message data must
-    support UTF-8.
-    """
-
-    def decode(self, content, response):
-        try:
-            return b64decode(content.encode('utf-8')).decode('utf-8')
-        except (ValueError, TypeError) as error:
-            # ValueError for Python 3, TypeError for Python 2
-            raise DecodeError(
-                message="Message content is not valid base 64.",
-                response=response,
-                error=error)
-
-
-class BinaryBase64EncodePolicy(MessageEncodePolicy):
-    """Base 64 message encoding policy for binary messages.
-
-    Encodes binary messages to base 64. If the input content
-    is not bytes, a TypeError will be raised.
-    """
-
-    def encode(self, content):
-        if not isinstance(content, six.binary_type):
-            raise TypeError("Message content must be bytes for base 64 encoding.")
-        return b64encode(content).decode('utf-8')
-
-
-class BinaryBase64DecodePolicy(MessageDecodePolicy):
-    """Message decoding policy for base 64-encoded messages into bytes.
-
-    Decodes base64-encoded messages to bytes. If the input content
-    is not valid base 64, a DecodeError will be raised.
-    """
-
-    def decode(self, content, response):
-        try:
-            return b64decode(content.encode('utf-8'))
-        except (ValueError, TypeError) as error:
-            # ValueError for Python 3, TypeError for Python 2
-            raise DecodeError(
-                message="Message content is not valid base 64.",
-                response=response,
-                error=error)
-
-
-class NoEncodePolicy(MessageEncodePolicy):
-    """Bypass any message content encoding."""
-
-    def encode(self, content):
-        if isinstance(content, six.binary_type) and sys.version_info > (3,):
-            raise TypeError(
-                "Message content must not be bytes. Use the BinaryBase64EncodePolicy to send bytes."
-            )
-        return content
-
-
-class NoDecodePolicy(MessageDecodePolicy):
-    """Bypass any message content decoding."""
-
-    def decode(self, content, response):
-        return content
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_models.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,435 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called
-
-from typing import List # pylint: disable=unused-import
-from azure.core.paging import PageIterator
-from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
-from ._shared.models import DictMixin
-from ._generated.models import StorageErrorException
-from ._generated.models import AccessPolicy as GenAccessPolicy
-from ._generated.models import Logging as GeneratedLogging
-from ._generated.models import Metrics as GeneratedMetrics
-from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
-from ._generated.models import CorsRule as GeneratedCorsRule
-
-
-class QueueAnalyticsLogging(GeneratedLogging):
-    """Azure Analytics Logging settings.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :keyword str version: Required. The version of Storage Analytics to configure.
-    :keyword bool delete: Required. Indicates whether all delete requests should be logged.
-    :keyword bool read: Required. Indicates whether all read requests should be logged.
-    :keyword bool write: Required. Indicates whether all write requests should be logged.
-    :keyword ~azure.storage.queue.RetentionPolicy retention_policy: Required.
-        The retention policy for the metrics.
-    """
-
-    def __init__(self, **kwargs):
-        self.version = kwargs.get('version', u'1.0')
-        self.delete = kwargs.get('delete', False)
-        self.read = kwargs.get('read', False)
-        self.write = kwargs.get('write', False)
-        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            version=generated.version,
-            delete=generated.delete,
-            read=generated.read,
-            write=generated.write,
-            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
-        )
-
-
-class Metrics(GeneratedMetrics):
-    """A summary of request statistics grouped by API in hour or minute aggregates.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :keyword str version: The version of Storage Analytics to configure.
-    :keyword bool enabled: Required. Indicates whether metrics are enabled for the service.
-    :keyword bool include_ap_is: Indicates whether metrics should generate summary
-        statistics for called API operations.
-    :keyword ~azure.storage.queue.RetentionPolicy retention_policy: Required.
-        The retention policy for the metrics.
-    """
-
-    def __init__(self, **kwargs):
-        self.version = kwargs.get('version', u'1.0')
-        self.enabled = kwargs.get('enabled', False)
-        self.include_apis = kwargs.get('include_apis')
-        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            version=generated.version,
-            enabled=generated.enabled,
-            include_apis=generated.include_apis,
-            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
-        )
-
-
-class RetentionPolicy(GeneratedRetentionPolicy):
-    """The retention policy which determines how long the associated data should
-    persist.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param bool enabled: Required. Indicates whether a retention policy is enabled
-        for the storage service.
-    :param int days: Indicates the number of days that metrics or logging or
-        soft-deleted data should be retained. All data older than this value will
-        be deleted.
-    """
-
-    def __init__(self, enabled=False, days=None):
-        self.enabled = enabled
-        self.days = days
-        if self.enabled and (self.days is None):
-            raise ValueError("If policy is enabled, 'days' must be specified.")
-
-    @classmethod
-    def _from_generated(cls, generated):
-        if not generated:
-            return cls()
-        return cls(
-            enabled=generated.enabled,
-            days=generated.days,
-        )
-
-
-class CorsRule(GeneratedCorsRule):
-    """CORS is an HTTP feature that enables a web application running under one
-    domain to access resources in another domain. Web browsers implement a
-    security restriction known as same-origin policy that prevents a web page
-    from calling APIs in a different domain; CORS provides a secure way to
-    allow one domain (the origin domain) to call APIs in another domain.
-
-    All required parameters must be populated in order to send to Azure.
-
-    :param list(str) allowed_origins:
-        A list of origin domains that will be allowed via CORS, or "*" to allow
-        all domains. The list of must contain at least one entry. Limited to 64
-        origin domains. Each allowed origin can have up to 256 characters.
-    :param list(str) allowed_methods:
-        A list of HTTP methods that are allowed to be executed by the origin.
-        The list of must contain at least one entry. For Azure Storage,
-        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
-    :keyword int max_age_in_seconds:
-        The number of seconds that the client/browser should cache a
-        pre-flight response.
-    :keyword list(str) exposed_headers:
-        Defaults to an empty list. A list of response headers to expose to CORS
-        clients. Limited to 64 defined headers and two prefixed headers. Each
-        header can be up to 256 characters.
-    :keyword list(str) allowed_headers:
-        Defaults to an empty list. A list of headers allowed to be part of
-        the cross-origin request. Limited to 64 defined headers and 2 prefixed
-        headers. Each header can be up to 256 characters.
-    """
-
-    def __init__(self, allowed_origins, allowed_methods, **kwargs):
-        self.allowed_origins = ','.join(allowed_origins)
-        self.allowed_methods = ','.join(allowed_methods)
-        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
-        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
-        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
-
-    @classmethod
-    def _from_generated(cls, generated):
-        return cls(
-            [generated.allowed_origins],
-            [generated.allowed_methods],
-            allowed_headers=[generated.allowed_headers],
-            exposed_headers=[generated.exposed_headers],
-            max_age_in_seconds=generated.max_age_in_seconds,
-        )
-
-
-class AccessPolicy(GenAccessPolicy):
-    """Access Policy class used by the set and get access policy methods.
-
-    A stored access policy can specify the start time, expiry time, and
-    permissions for the Shared Access Signatures with which it's associated.
-    Depending on how you want to control access to your resource, you can
-    specify all of these parameters within the stored access policy, and omit
-    them from the URL for the Shared Access Signature. Doing so permits you to
-    modify the associated signature's behavior at any time, as well as to revoke
-    it. Or you can specify one or more of the access policy parameters within
-    the stored access policy, and the others on the URL. Finally, you can
-    specify all of the parameters on the URL. In this case, you can use the
-    stored access policy to revoke the signature, but not to modify its behavior.
-
-    Together the Shared Access Signature and the stored access policy must
-    include all fields required to authenticate the signature. If any required
-    fields are missing, the request will fail. Likewise, if a field is specified
-    both in the Shared Access Signature URL and in the stored access policy, the
-    request will fail with status code 400 (Bad Request).
-
-    :param str permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    """
-
-    def __init__(self, permission=None, expiry=None, start=None):
-        self.start = start
-        self.expiry = expiry
-        self.permission = permission
-
-
-class QueueMessage(DictMixin):
-    """Represents a queue message.
-
-    :ivar str id:
-        A GUID value assigned to the message by the Queue service that
-        identifies the message in the queue. This value may be used together
-        with the value of pop_receipt to delete a message from the queue after
-        it has been retrieved with the receive messages operation.
-    :ivar date inserted_on:
-        A UTC date value representing the time the messages was inserted.
-    :ivar date expires_on:
-        A UTC date value representing the time the message expires.
-    :ivar int dequeue_count:
-        Begins with a value of 1 the first time the message is received. This
-        value is incremented each time the message is subsequently received.
-    :param obj content:
-        The message content. Type is determined by the decode_function set on
-        the service. Default is str.
-    :ivar str pop_receipt:
-        A receipt str which can be used together with the message_id element to
-        delete a message from the queue after it has been retrieved with the receive
-        messages operation. Only returned by receive messages operations. Set to
-        None for peek messages.
-    :ivar date next_visible_on:
-        A UTC date value representing the time the message will next be visible.
-        Only returned by receive messages operations. Set to None for peek messages.
-    """
-
-    def __init__(self, content=None):
-        self.id = None
-        self.inserted_on = None
-        self.expires_on = None
-        self.dequeue_count = None
-        self.content = content
-        self.pop_receipt = None
-        self.next_visible_on = None
-
-    @classmethod
-    def _from_generated(cls, generated):
-        message = cls(content=generated.message_text)
-        message.id = generated.message_id
-        message.inserted_on = generated.insertion_time
-        message.expires_on = generated.expiration_time
-        message.dequeue_count = generated.dequeue_count
-        if hasattr(generated, 'pop_receipt'):
-            message.pop_receipt = generated.pop_receipt
-            message.next_visible_on = generated.time_next_visible
-        return message
-
-
-class MessagesPaged(PageIterator):
-    """An iterable of Queue Messages.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param int results_per_page: The maximum number of messages to retrieve per
-        call.
-    """
-    def __init__(self, command, results_per_page=None, continuation_token=None):
-        if continuation_token is not None:
-            raise ValueError("This operation does not support continuation token")
-
-        super(MessagesPaged, self).__init__(
-            self._get_next_cb,
-            self._extract_data_cb,
-        )
-        self._command = command
-        self.results_per_page = results_per_page
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(number_of_messages=self.results_per_page)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, messages): # pylint: disable=no-self-use
-        # There is no concept of continuation token, so raising on my own condition
-        if not messages:
-            raise StopIteration("End of paging")
-        return "TOKEN_IGNORED", [QueueMessage._from_generated(q) for q in messages]  # pylint: disable=protected-access
-
-
-class QueueProperties(DictMixin):
-    """Queue Properties.
-
-    :ivar str name: The name of the queue.
-    :keyword dict(str,str) metadata:
-        A dict containing name-value pairs associated with the queue as metadata.
-        This var is set to None unless the include=metadata param was included
-        for the list queues operation. If this parameter was specified but the
-    """
-
-    def __init__(self, **kwargs):
-        self.name = None
-        self.metadata = kwargs.get('metadata')
-        self.approximate_message_count = kwargs.get('x-ms-approximate-messages-count')
-
-    @classmethod
-    def _from_generated(cls, generated):
-        props = cls()
-        props.name = generated.name
-        props.metadata = generated.metadata
-        return props
-
-
-class QueuePropertiesPaged(PageIterator):
-    """An iterable of Queue properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A queue name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str next_marker: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only queues whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of queue names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(QueuePropertiesPaged, self).__init__(
-            self._get_next_cb,
-            self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-
-    def _get_next_cb(self, continuation_token):
-        try:
-            return self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        props_list = [QueueProperties._from_generated(q) for q in self._response.queue_items] # pylint: disable=protected-access
-        return self._response.next_marker or None, props_list
-
-
-class QueueSasPermissions(object):
-    """QueueSasPermissions class to be used with the
-    :func:`~azure.storage.queue.generate_queue_sas` function and for the AccessPolicies used with
-    :func:`~azure.storage.queue.QueueClient.set_queue_access_policy`.
-
-    :param bool read:
-        Read metadata and properties, including message count. Peek at messages.
-    :param bool add:
-        Add messages to the queue.
-    :param bool update:
-        Update messages in the queue. Note: Use the Process permission with
-        Update so you can first get the message you want to update.
-    :param bool process:
-        Get and delete messages from the queue.
-    """
-    def __init__(self, read=False, add=False, update=False, process=False):
-        self.read = read
-        self.add = add
-        self.update = update
-        self.process = process
-        self._str = (('r' if self.read else '') +
-                     ('a' if self.add else '') +
-                     ('u' if self.update else '') +
-                     ('p' if self.process else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create a QueueSasPermissions from a string.
-
-        To specify read, add, update, or process permissions you need only to
-        include the first letter of the word in the string. E.g. For read and
-        update permissions, you would provide a string "ru".
-
-        :param str permission: The string which dictates the
-            read, add, update, or process permissions.
-        :return: A QueueSasPermissions object
-        :rtype: ~azure.storage.queue.QueueSasPermissions
-        """
-        p_read = 'r' in permission
-        p_add = 'a' in permission
-        p_update = 'u' in permission
-        p_process = 'p' in permission
-
-        parsed = cls(p_read, p_add, p_update, p_process)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-
-def service_stats_deserialize(generated):
-    """Deserialize a ServiceStats objects into a dict.
-    """
-    return {
-        'geo_replication': {
-            'status': generated.geo_replication.status,
-            'last_sync_time': generated.geo_replication.last_sync_time,
-        }
-    }
-
-
-def service_properties_deserialize(generated):
-    """Deserialize a ServiceProperties objects into a dict.
-    """
-    return {
-        'analytics_logging': QueueAnalyticsLogging._from_generated(generated.logging),  # pylint: disable=protected-access
-        'hour_metrics': Metrics._from_generated(generated.hour_metrics),  # pylint: disable=protected-access
-        'minute_metrics': Metrics._from_generated(generated.minute_metrics),  # pylint: disable=protected-access
-        'cors': [CorsRule._from_generated(cors) for cors in generated.cors],  # pylint: disable=protected-access
-    }
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,773 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
-    TYPE_CHECKING)
-try:
-    from urllib.parse import urlparse, quote, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-
-from azure.core.paging import ItemPaged
-from azure.core.tracing.decorator import distributed_trace
-from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query
-from ._shared.request_handlers import add_metadata_headers, serialize_iso
-from ._shared.response_handlers import (
-    process_storage_error,
-    return_response_headers,
-    return_headers_and_deserialized)
-from ._message_encoding import NoEncodePolicy, NoDecodePolicy
-from ._deserialize import deserialize_queue_properties, deserialize_queue_creation
-from ._generated import AzureQueueStorage, VERSION
-from ._generated.models import StorageErrorException, SignedIdentifier
-from ._generated.models import QueueMessage as GenQueueMessage
-from ._models import QueueMessage, AccessPolicy, MessagesPaged
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.pipeline.policies import HTTPPolicy
-    from ._models import QueueProperties
-
-
-class QueueClient(StorageAccountHostsMixin):
-    """A client to interact with a specific Queue.
-
-    :param str account_url:
-        The URL to the storage account. In order to create a client given the full URI to the queue,
-        use the :func:`from_queue_url` classmethod.
-    :param queue_name: The name of the queue.
-    :type queue_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword encode_policy: The encoding policy to use on outgoing messages.
-        Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`,
-        :class:`BinaryBase64EncodePolicy` or `None`.
-    :keyword decode_policy: The decoding policy to use on incoming messages.
-        Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`,
-        :class:`BinaryBase64DecodePolicy` or `None`.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/queue_samples_message.py
-            :start-after: [START create_queue_client]
-            :end-before: [END create_queue_client]
-            :language: python
-            :dedent: 12
-            :caption: Create the queue client with url and credential.
-    """
-    def __init__(
-            self, account_url,  # type: str
-            queue_name,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not queue_name:
-            raise ValueError("Please specify a queue name.")
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(parsed_url))
-
-        _, sas_token = parse_query(parsed_url.query)
-        if not sas_token and not credential:
-            raise ValueError("You need to provide either a SAS token or an account shared key to authenticate.")
-
-        self.queue_name = queue_name
-        self._query_str, credential = self._format_query_string(sas_token, credential)
-        super(QueueClient, self).__init__(parsed_url, service='queue', credential=credential, **kwargs)
-
-        self._config.message_encode_policy = kwargs.get('message_encode_policy', None) or NoEncodePolicy()
-        self._config.message_decode_policy = kwargs.get('message_decode_policy', None) or NoDecodePolicy()
-        self._client = AzureQueueStorage(self.url, pipeline=self._pipeline)
-        self._client._config.version = kwargs.get('api_version', VERSION)  # pylint: disable=protected-access
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        queue_name = self.queue_name
-        if isinstance(queue_name, six.text_type):
-            queue_name = queue_name.encode('UTF-8')
-        return "{}://{}/{}{}".format(
-            self.scheme,
-            hostname,
-            quote(queue_name),
-            self._query_str)
-
-    @classmethod
-    def from_queue_url(cls, queue_url, credential=None, **kwargs):
-        # type: (str, Optional[Any], Any) -> QueueClient
-        """A client to interact with a specific Queue.
-
-        :param str queue_url: The full URI to the queue, including SAS token if used.
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token. The value can be a SAS token string, an account
-            shared access key, or an instance of a TokenCredentials class from azure.identity.
-        :returns: A queue client.
-        :rtype: ~azure.storage.queue.QueueClient
-        """
-        try:
-            if not queue_url.lower().startswith('http'):
-                queue_url = "https://" + queue_url
-        except AttributeError:
-            raise ValueError("Queue URL must be a string.")
-        parsed_url = urlparse(queue_url.rstrip('/'))
-
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(queue_url))
-
-        queue_path = parsed_url.path.lstrip('/').split('/')
-        account_path = ""
-        if len(queue_path) > 1:
-            account_path = "/" + "/".join(queue_path[:-1])
-        account_url = "{}://{}{}?{}".format(
-            parsed_url.scheme,
-            parsed_url.netloc.rstrip('/'),
-            account_path,
-            parsed_url.query)
-        queue_name = unquote(queue_path[-1])
-        if not queue_name:
-            raise ValueError("Invalid URL. Please provide a URL with a valid queue name")
-        return cls(account_url, queue_name=queue_name, credential=credential, **kwargs)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            queue_name,  # type: str
-            credential=None,  # type: Any
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        """Create QueueClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param queue_name: The queue name.
-        :type queue_name: str
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-        :returns: A queue client.
-        :rtype: ~azure.storage.queue.QueueClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START create_queue_client_from_connection_string]
-                :end-before: [END create_queue_client_from_connection_string]
-                :language: python
-                :dedent: 8
-                :caption: Create the queue client from connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(
-            conn_str, credential, 'queue')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(account_url, queue_name=queue_name, credential=credential, **kwargs) # type: ignore
-
-    @distributed_trace
-    def create_queue(self, **kwargs):
-        # type: (Optional[Any]) -> None
-        """Creates a new queue in the storage account.
-
-        If a queue with the same name already exists, the operation fails with
-        a `ResourceExistsError`.
-
-        :keyword dict(str,str) metadata:
-            A dict containing name-value pairs to associate with the queue as
-            metadata. Note that metadata names preserve the case with which they
-            were created, but are case-insensitive when set or read.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises: StorageErrorException
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_hello_world.py
-                :start-after: [START create_queue]
-                :end-before: [END create_queue]
-                :language: python
-                :dedent: 8
-                :caption: Create a queue.
-        """
-        headers = kwargs.pop('headers', {})
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return self._client.queue.create( # type: ignore
-                metadata=metadata,
-                timeout=timeout,
-                headers=headers,
-                cls=deserialize_queue_creation,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def delete_queue(self, **kwargs):
-        # type: (Optional[Any]) -> None
-        """Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion
-        and is no longer accessible to clients. The queue is later removed from
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete.
-        If an operation is attempted against the queue while it was being deleted,
-        an :class:`HttpResponseError` will be thrown.
-
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_hello_world.py
-                :start-after: [START delete_queue]
-                :end-before: [END delete_queue]
-                :language: python
-                :dedent: 12
-                :caption: Delete a queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            self._client.queue.delete(timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_queue_properties(self, **kwargs):
-        # type: (Optional[Any]) -> QueueProperties
-        """Returns all user-defined metadata for the specified queue.
-
-        The data returned does not include the queue's list of messages.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: User-defined metadata for the queue.
-        :rtype: ~azure.storage.queue.QueueProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START get_queue_properties]
-                :end-before: [END get_queue_properties]
-                :language: python
-                :dedent: 12
-                :caption: Get the properties on the queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response = self._client.queue.get_properties(
-                timeout=timeout,
-                cls=deserialize_queue_properties,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        response.name = self.queue_name
-        return response # type: ignore
-
-    @distributed_trace
-    def set_queue_metadata(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, Any]], Optional[Any]) -> None
-        """Sets user-defined metadata on the specified queue.
-
-        Metadata is associated with the queue as name-value pairs.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the
-            queue as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START set_queue_metadata]
-                :end-before: [END set_queue_metadata]
-                :language: python
-                :dedent: 12
-                :caption: Set metadata on the queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop('headers', {})
-        headers.update(add_metadata_headers(metadata)) # type: ignore
-        try:
-            return self._client.queue.set_metadata( # type: ignore
-                timeout=timeout,
-                headers=headers,
-                cls=return_response_headers,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_queue_access_policy(self, **kwargs):
-        # type: (Optional[Any]) -> Dict[str, Any]
-        """Returns details about any stored access policies specified on the
-        queue that may be used with Shared Access Signatures.
-
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the queue.
-        :rtype: dict(str, ~azure.storage.queue.AccessPolicy)
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            _, identifiers = self._client.queue.get_access_policy(
-                timeout=timeout,
-                cls=return_headers_and_deserialized,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return {s.id: s.access_policy or AccessPolicy() for s in identifiers}
-
-    @distributed_trace
-    def set_queue_access_policy(self, signed_identifiers, **kwargs):
-        # type: (Dict[str, AccessPolicy], Optional[Any]) -> None
-        """Sets stored access policies for the queue that may be used with Shared
-        Access Signatures.
-
-        When you set permissions for a queue, the existing permissions are replaced.
-        To update the queue's permissions, call :func:`~get_queue_access_policy` to fetch
-        all access policies associated with the queue, modify the access policy
-        that you wish to change, and then call this function with the complete
-        set of data to perform the update.
-
-        When you establish a stored access policy on a queue, it may take up to
-        30 seconds to take effect. During this interval, a shared access signature
-        that is associated with the stored access policy will throw an
-        :class:`HttpResponseError` until the access policy becomes active.
-
-        :param signed_identifiers:
-            SignedIdentifier access policies to associate with the queue.
-            This may contain up to 5 elements. An empty dict
-            will clear the access policies set on the service.
-        :type signed_identifiers: dict(str, ~azure.storage.queue.AccessPolicy)
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START set_access_policy]
-                :end-before: [END set_access_policy]
-                :language: python
-                :dedent: 12
-                :caption: Set an access policy on the queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        if len(signed_identifiers) > 15:
-            raise ValueError(
-                'Too many access policies provided. The server does not support setting '
-                'more than 15 access policies on a single resource.')
-        identifiers = []
-        for key, value in signed_identifiers.items():
-            if value:
-                value.start = serialize_iso(value.start)
-                value.expiry = serialize_iso(value.expiry)
-            identifiers.append(SignedIdentifier(id=key, access_policy=value))
-        signed_identifiers = identifiers # type: ignore
-        try:
-            self._client.queue.set_access_policy(
-                queue_acl=signed_identifiers or None,
-                timeout=timeout,
-                **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def send_message( # type: ignore
-            self, content, # type: Any
-            **kwargs  # type: Optional[Any]
-        ):
-        # type: (...) -> QueueMessage
-        """Adds a new message to the back of the message queue.
-
-        The visibility timeout specifies the time that the message will be
-        invisible. After the timeout expires, the message will become visible.
-        If a visibility timeout is not specified, the default value of 0 is used.
-
-        The message time-to-live specifies how long a message will remain in the
-        queue. The message will be deleted from the queue when the time-to-live
-        period expires.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function
-            set on the service. Default is str. The encoded message can be up to
-            64KB in size.
-        :keyword int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :keyword int time_to_live:
-            Specifies the time-to-live interval for the message, in
-            seconds. The time-to-live may be any positive number or -1 for infinity. If this
-            parameter is omitted, the default time-to-live is 7 days.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.QueueMessage` object.
-            This object is also populated with the content although it is not
-            returned from the service.
-        :rtype: ~azure.storage.queue.QueueMessage
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START send_messages]
-                :end-before: [END send_messages]
-                :language: python
-                :dedent: 12
-                :caption: Send messages.
-        """
-        visibility_timeout = kwargs.pop('visibility_timeout', None)
-        time_to_live = kwargs.pop('time_to_live', None)
-        timeout = kwargs.pop('timeout', None)
-        self._config.message_encode_policy.configure(
-            require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            resolver=self.key_resolver_function)
-        content = self._config.message_encode_policy(content)
-        new_message = GenQueueMessage(message_text=content)
-
-        try:
-            enqueued = self._client.messages.enqueue(
-                queue_message=new_message,
-                visibilitytimeout=visibility_timeout,
-                message_time_to_live=time_to_live,
-                timeout=timeout,
-                **kwargs)
-            queue_message = QueueMessage(content=new_message.message_text)
-            queue_message.id = enqueued[0].message_id
-            queue_message.inserted_on = enqueued[0].insertion_time
-            queue_message.expires_on = enqueued[0].expiration_time
-            queue_message.pop_receipt = enqueued[0].pop_receipt
-            queue_message.next_visible_on = enqueued[0].time_next_visible
-            return queue_message
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def receive_messages(self, **kwargs):
-        # type: (Optional[Any]) -> ItemPaged[QueueMessage]
-        """Removes one or more messages from the front of the queue.
-
-        When a message is retrieved from the queue, the response includes the message
-        content and a pop_receipt value, which is required to delete the message.
-        The message is not automatically deleted from the queue, but after it has
-        been retrieved, it is not visible to other clients for the time interval
-        specified by the visibility_timeout parameter.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :keyword int messages_per_page:
-            A nonzero integer value that specifies the number of
-            messages to retrieve from the queue, up to a maximum of 32. If
-            fewer are visible, the visible messages are returned. By default,
-            a single message is retrieved from the queue with this operation.
-        :keyword int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            Returns a message iterator of dict-like Message objects.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.queue.QueueMessage]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START receive_messages]
-                :end-before: [END receive_messages]
-                :language: python
-                :dedent: 12
-                :caption: Receive messages from the queue.
-        """
-        messages_per_page = kwargs.pop('messages_per_page', None)
-        visibility_timeout = kwargs.pop('visibility_timeout', None)
-        timeout = kwargs.pop('timeout', None)
-        self._config.message_decode_policy.configure(
-            require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            resolver=self.key_resolver_function)
-        try:
-            command = functools.partial(
-                self._client.messages.dequeue,
-                visibilitytimeout=visibility_timeout,
-                timeout=timeout,
-                cls=self._config.message_decode_policy,
-                **kwargs
-            )
-            return ItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def update_message(self, message, pop_receipt=None, content=None, **kwargs):
-        # type: (Any, Optional[str], Optional[Any], Any) -> QueueMessage
-        """Updates the visibility timeout of a message. You can also use this
-        operation to update the contents of a message.
-
-        This operation can be used to continually extend the invisibility of a
-        queue message. This functionality can be useful if you want a worker role
-        to "lease" a queue message. For example, if a worker role calls :func:`~receive_messages()`
-        and recognizes that it needs more time to process a message, it can
-        continually extend the message's invisibility until it is processed. If
-        the worker role were to fail during processing, eventually the message
-        would become visible again and another worker role could process it.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param message:
-            The message object or id identifying the message to update.
-        :type message: str or ~azure.storage.queue.QueueMessage
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~receive_messages` or :func:`~update_message` operation.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function
-            set on the service. Default is str.
-        :keyword int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds,
-            relative to server time. The new value must be larger than or equal
-            to 0, and cannot be larger than 7 days. The visibility timeout of a
-            message cannot be set to a value later than the expiry time. A
-            message can be updated until it has been deleted or has expired.
-            The message object or message id identifying the message to update.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.QueueMessage` object. For convenience,
-            this object is also populated with the content, although it is not returned by the service.
-        :rtype: ~azure.storage.queue.QueueMessage
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START update_message]
-                :end-before: [END update_message]
-                :language: python
-                :dedent: 12
-                :caption: Update a message.
-        """
-        visibility_timeout = kwargs.pop('visibility_timeout', None)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            message_id = message.id
-            message_text = content or message.content
-            receipt = pop_receipt or message.pop_receipt
-            inserted_on = message.inserted_on
-            expires_on = message.expires_on
-            dequeue_count = message.dequeue_count
-        except AttributeError:
-            message_id = message
-            message_text = content
-            receipt = pop_receipt
-            inserted_on = None
-            expires_on = None
-            dequeue_count = None
-
-        if receipt is None:
-            raise ValueError("pop_receipt must be present")
-        if message_text is not None:
-            self._config.message_encode_policy.configure(
-                self.require_encryption,
-                self.key_encryption_key,
-                self.key_resolver_function)
-            message_text = self._config.message_encode_policy(message_text)
-            updated = GenQueueMessage(message_text=message_text)
-        else:
-            updated = None # type: ignore
-        try:
-            response = self._client.message_id.update(
-                queue_message=updated,
-                visibilitytimeout=visibility_timeout or 0,
-                timeout=timeout,
-                pop_receipt=receipt,
-                cls=return_response_headers,
-                queue_message_id=message_id,
-                **kwargs)
-            new_message = QueueMessage(content=message_text)
-            new_message.id = message_id
-            new_message.inserted_on = inserted_on
-            new_message.expires_on = expires_on
-            new_message.dequeue_count = dequeue_count
-            new_message.pop_receipt = response['popreceipt']
-            new_message.next_visible_on = response['time_next_visible']
-            return new_message
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def peek_messages(self, max_messages=None, **kwargs):
-        # type: (Optional[int], Optional[Any]) -> List[QueueMessage]
-        """Retrieves one or more messages from the front of the queue, but does
-        not alter the visibility of the message.
-
-        Only messages that are visible may be retrieved. When a message is retrieved
-        for the first time with a call to :func:`~receive_messages`, its dequeue_count property
-        is set to 1. If it is not deleted and is subsequently retrieved again, the
-        dequeue_count property is incremented. The client may use this value to
-        determine how many times a message has been retrieved. Note that a call
-        to peek_messages does not increment the value of dequeue_count, but returns
-        this value for the client to read.
-
-        If the key-encryption-key or resolver field is set on the local service object,
-        the messages will be decrypted before being returned.
-
-        :param int max_messages:
-            A nonzero integer value that specifies the number of
-            messages to peek from the queue, up to a maximum of 32. By default,
-            a single message is peeked from the queue with this operation.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A list of :class:`~azure.storage.queue.QueueMessage` objects. Note that
-            next_visible_on and pop_receipt will not be populated as peek does
-            not pop the message and can only retrieve already visible messages.
-        :rtype: list(:class:`~azure.storage.queue.QueueMessage`)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START peek_message]
-                :end-before: [END peek_message]
-                :language: python
-                :dedent: 12
-                :caption: Peek messages.
-        """
-        timeout = kwargs.pop('timeout', None)
-        if max_messages and not 1 <= max_messages <= 32:
-            raise ValueError("Number of messages to peek should be between 1 and 32")
-        self._config.message_decode_policy.configure(
-            require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            resolver=self.key_resolver_function)
-        try:
-            messages = self._client.messages.peek(
-                number_of_messages=max_messages,
-                timeout=timeout,
-                cls=self._config.message_decode_policy,
-                **kwargs)
-            wrapped_messages = []
-            for peeked in messages:
-                wrapped_messages.append(QueueMessage._from_generated(peeked))  # pylint: disable=protected-access
-            return wrapped_messages
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def clear_messages(self, **kwargs):
-        # type: (Optional[Any]) -> None
-        """Deletes all messages from the specified queue.
-
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START clear_messages]
-                :end-before: [END clear_messages]
-                :language: python
-                :dedent: 12
-                :caption: Clears all messages.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            self._client.messages.clear(timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def delete_message(self, message, pop_receipt=None, **kwargs):
-        # type: (Any, Optional[str], Any) -> None
-        """Deletes the specified message.
-
-        Normally after a client retrieves a message with the receive messages operation,
-        the client is expected to process and delete the message. To delete the
-        message, you must have the message object itself, or two items of data: id and pop_receipt.
-        The id is returned from the previous receive_messages operation. The
-        pop_receipt is returned from the most recent :func:`~receive_messages` or
-        :func:`~update_message` operation. In order for the delete_message operation
-        to succeed, the pop_receipt specified on the request must match the
-        pop_receipt returned from the :func:`~receive_messages` or :func:`~update_message`
-        operation.
-
-        :param message:
-            The message object or id identifying the message to delete.
-        :type message: str or ~azure.storage.queue.QueueMessage
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~receive_messages` or :func:`~update_message`.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message.py
-                :start-after: [START delete_message]
-                :end-before: [END delete_message]
-                :language: python
-                :dedent: 12
-                :caption: Delete a message.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            message_id = message.id
-            receipt = pop_receipt or message.pop_receipt
-        except AttributeError:
-            message_id = message
-            receipt = pop_receipt
-
-        if receipt is None:
-            raise ValueError("pop_receipt must be present")
-        try:
-            self._client.message_id.delete(
-                pop_receipt=receipt,
-                timeout=timeout,
-                queue_message_id=message_id,
-                **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,425 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List,
-    TYPE_CHECKING)
-try:
-    from urllib.parse import urlparse
-except ImportError:
-    from urlparse import urlparse # type: ignore
-
-from azure.core.paging import ItemPaged
-from azure.core.pipeline import Pipeline
-from azure.core.tracing.decorator import distributed_trace
-from ._shared.models import LocationMode
-from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
-from ._shared.response_handlers import process_storage_error
-from ._generated import AzureQueueStorage, VERSION
-from ._generated.models import StorageServiceProperties, StorageErrorException
-
-from ._models import (
-    QueuePropertiesPaged,
-    service_stats_deserialize,
-    service_properties_deserialize,
-)
-
-from ._queue_client import QueueClient
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.configuration import Configuration
-    from azure.core.pipeline.policies import HTTPPolicy
-    from ._models import (
-        QueueProperties,
-        QueueAnalyticsLogging,
-        Metrics,
-        CorsRule,
-    )
-
-
-class QueueServiceClient(StorageAccountHostsMixin):
-    """A client to interact with the Queue Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete queues within the account.
-    For operations relating to a specific queue, a client for this entity
-    can be retrieved using the :func:`~get_queue_client` function.
-
-    :param str account_url:
-        The URL to the queue service endpoint. Any other entities included
-        in the URL path (e.g. queue) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/queue_samples_authentication.py
-            :start-after: [START create_queue_service_client]
-            :end-before: [END create_queue_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the QueueServiceClient with an account url and credential.
-
-        .. literalinclude:: ../samples/queue_samples_authentication.py
-            :start-after: [START create_queue_service_client_token]
-            :end-before: [END create_queue_service_client_token]
-            :language: python
-            :dedent: 8
-            :caption: Creating the QueueServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        try:
-            if not account_url.lower().startswith('http'):
-                account_url = "https://" + account_url
-        except AttributeError:
-            raise ValueError("Account URL must be a string.")
-        parsed_url = urlparse(account_url.rstrip('/'))
-        if not parsed_url.netloc:
-            raise ValueError("Invalid URL: {}".format(account_url))
-
-        _, sas_token = parse_query(parsed_url.query)
-        if not sas_token and not credential:
-            raise ValueError("You need to provide either a SAS token or an account shared key to authenticate.")
-        self._query_str, credential = self._format_query_string(sas_token, credential)
-        super(QueueServiceClient, self).__init__(parsed_url, service='queue', credential=credential, **kwargs)
-        self._client = AzureQueueStorage(self.url, pipeline=self._pipeline)
-        self._client._config.version = kwargs.get('api_version', VERSION)  # pylint: disable=protected-access
-
-    def _format_url(self, hostname):
-        """Format the endpoint URL according to the current location
-        mode hostname.
-        """
-        return "{}://{}/{}".format(self.scheme, hostname, self._query_str)
-
-    @classmethod
-    def from_connection_string(
-            cls, conn_str,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):  # type: (...) -> QueueServiceClient
-        """Create QueueServiceClient from a Connection String.
-
-        :param str conn_str:
-            A connection string to an Azure Storage account.
-        :param credential:
-            The credentials with which to authenticate. This is optional if the
-            account URL already has a SAS token, or the connection string already has shared
-            access key values. The value can be a SAS token string, an account shared access
-            key, or an instance of a TokenCredentials class from azure.identity.
-        :returns: A Queue service client.
-        :rtype: ~azure.storage.queue.QueueClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_authentication.py
-                :start-after: [START auth_from_connection_string]
-                :end-before: [END auth_from_connection_string]
-                :language: python
-                :dedent: 8
-                :caption: Creating the QueueServiceClient with a connection string.
-        """
-        account_url, secondary, credential = parse_connection_str(
-            conn_str, credential, 'queue')
-        if 'secondary_hostname' not in kwargs:
-            kwargs['secondary_hostname'] = secondary
-        return cls(account_url, credential=credential, **kwargs)
-
-    @distributed_trace
-    def get_service_stats(self, **kwargs):
-        # type: (Optional[Any]) -> Dict[str, Any]
-        """Retrieves statistics related to replication for the Queue service.
-
-        It is only available when read-access geo-redundant replication is enabled for
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable
-        in two locations. In both locations, Azure Storage constantly maintains
-        multiple healthy replicas of your data. The location where you read,
-        create, update, or delete data is the primary storage account location.
-        The primary location exists in the region you choose at the time you
-        create an account via the Azure Management Azure classic portal, for
-        example, North Central US. The location to which your data is replicated
-        is the secondary location. The secondary location is automatically
-        determined based on the location of the primary; it is in a second data
-        center that resides in the same region as the primary location. Read-only
-        access is available from the secondary location, if read-access geo-redundant
-        replication is enabled for your storage account.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The queue service stats.
-        :rtype: Dict[str, Any]
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            stats = self._client.service.get_statistics( # type: ignore
-                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
-            return service_stats_deserialize(stats)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def get_service_properties(self, **kwargs):
-        # type: (Optional[Any]) -> Dict[str, Any]
-        """Gets the properties of a storage account's Queue service, including
-        Azure Storage Analytics.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An object containing queue service properties such as
-            analytics logging, hour/minute metrics, cors rules, etc.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service.py
-                :start-after: [START get_queue_service_properties]
-                :end-before: [END get_queue_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Getting queue service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            service_props = self._client.service.get_properties(timeout=timeout, **kwargs) # type: ignore
-            return service_properties_deserialize(service_props)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def set_service_properties( # type: ignore
-            self, analytics_logging=None,  # type: Optional[QueueAnalyticsLogging]
-            hour_metrics=None,  # type: Optional[Metrics]
-            minute_metrics=None,  # type: Optional[Metrics]
-            cors=None,  # type: Optional[List[CorsRule]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Sets the properties of a storage account's Queue service, including
-        Azure Storage Analytics.
-
-        If an element (e.g. analytics_logging) is left as None, the
-        existing settings on the service for that functionality are preserved.
-
-        :param analytics_logging:
-            Groups the Azure Analytics Logging settings.
-        :type analytics_logging: ~azure.storage.queue.QueueAnalyticsLogging
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request
-            statistics grouped by API in hourly aggregates for queues.
-        :type hour_metrics: ~azure.storage.queue.Metrics
-        :param minute_metrics:
-            The minute metrics settings provide request statistics
-            for each minute for queues.
-        :type minute_metrics: ~azure.storage.queue.Metrics
-        :param cors:
-            You can include up to five CorsRule elements in the
-            list. If an empty list is specified, all CORS rules will be deleted,
-            and CORS will be disabled for the service.
-        :type cors: list(~azure.storage.queue.CorsRule)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service.py
-                :start-after: [START set_queue_service_properties]
-                :end-before: [END set_queue_service_properties]
-                :language: python
-                :dedent: 8
-                :caption: Setting queue service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        props = StorageServiceProperties(
-            logging=analytics_logging,
-            hour_metrics=hour_metrics,
-            minute_metrics=minute_metrics,
-            cors=cors
-        )
-        try:
-            return self._client.service.set_properties(props, timeout=timeout, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_queues(
-            self, name_starts_with=None,  # type: Optional[str]
-            include_metadata=False,  # type: Optional[bool]
-            **kwargs
-        ):
-        # type: (...) -> ItemPaged[QueueProperties]
-        """Returns a generator to list the queues under the specified account.
-
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all queues have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only queues whose names
-            begin with the specified prefix.
-        :param bool include_metadata:
-            Specifies that queue metadata be returned in the response.
-        :keyword int results_per_page:
-            The maximum number of queue names to retrieve per API
-            call. If the request does not specify the server will return up to 5,000 items.
-        :keyword int timeout:
-            The server timeout, expressed in seconds. This function may make multiple
-            calls to the service in which case the timeout value specified will be
-            applied to each individual call.
-        :returns: An iterable (auto-paging) of QueueProperties.
-        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.queue.QueueProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service.py
-                :start-after: [START qsc_list_queues]
-                :end-before: [END qsc_list_queues]
-                :language: python
-                :dedent: 12
-                :caption: List queues in the service.
-        """
-        results_per_page = kwargs.pop('results_per_page', None)
-        timeout = kwargs.pop('timeout', None)
-        include = ['metadata'] if include_metadata else None
-        command = functools.partial(
-            self._client.service.list_queues_segment,
-            prefix=name_starts_with,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return ItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=QueuePropertiesPaged
-        )
-
-    @distributed_trace
-    def create_queue(
-            self, name,  # type: str
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> QueueClient
-        """Creates a new queue under the specified account.
-
-        If a queue with the same name already exists, the operation fails.
-        Returns a client with which to interact with the newly created queue.
-
-        :param str name: The name of the queue to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            queue as metadata. Example: {'Category': 'test'}
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.queue.QueueClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service.py
-                :start-after: [START qsc_create_queue]
-                :end-before: [END qsc_create_queue]
-                :language: python
-                :dedent: 8
-                :caption: Create a queue in the service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        queue = self.get_queue_client(name)
-        kwargs.setdefault('merge_span', True)
-        queue.create_queue(
-            metadata=metadata, timeout=timeout, **kwargs)
-        return queue
-
-    @distributed_trace
-    def delete_queue(
-            self, queue,  # type: Union[QueueProperties, str]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion
-        and is no longer accessible to clients. The queue is later removed from
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete.
-        If an operation is attempted against the queue while it was being deleted,
-        an :class:`HttpResponseError` will be thrown.
-
-        :param queue:
-            The queue to delete. This can either be the name of the queue,
-            or an instance of QueueProperties.
-        :type queue: str or ~azure.storage.queue.QueueProperties
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service.py
-                :start-after: [START qsc_delete_queue]
-                :end-before: [END qsc_delete_queue]
-                :language: python
-                :dedent: 12
-                :caption: Delete a queue in the service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        queue_client = self.get_queue_client(queue)
-        kwargs.setdefault('merge_span', True)
-        queue_client.delete_queue(timeout=timeout, **kwargs)
-
-    def get_queue_client(self, queue, **kwargs):
-        # type: (Union[QueueProperties, str], Optional[Any]) -> QueueClient
-        """Get a client to interact with the specified queue.
-
-        The queue need not already exist.
-
-        :param queue:
-            The queue. This can either be the name of the queue,
-            or an instance of QueueProperties.
-        :type queue: str or ~azure.storage.queue.QueueProperties
-        :returns: A :class:`~azure.storage.queue.QueueClient` object.
-        :rtype: ~azure.storage.queue.QueueClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service.py
-                :start-after: [START get_queue_client]
-                :end-before: [END get_queue_client]
-                :language: python
-                :dedent: 8
-                :caption: Get the queue client.
-        """
-        try:
-            queue_name = queue.name
-        except AttributeError:
-            queue_name = queue
-
-        _pipeline = Pipeline(
-            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-
-        return QueueClient(
-            self.url, queue_name=queue_name, credential=self.credential,
-            key_resolver_function=self.key_resolver_function, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key, api_version=self.api_version, _pipeline=_pipeline,
-            _configuration=self._config, _location_mode=self._location_mode, _hosts=self._hosts, **kwargs)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,56 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import hmac
-
-try:
-    from urllib.parse import quote, unquote
-except ImportError:
-    from urllib2 import quote, unquote # type: ignore
-
-import six
-
-
-def url_quote(url):
-    return quote(url)
-
-
-def url_unquote(url):
-    return unquote(url)
-
-
-def encode_base64(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def decode_base64_to_bytes(data):
-    if isinstance(data, six.text_type):
-        data = data.encode('utf-8')
-    return base64.b64decode(data)
-
-
-def decode_base64_to_text(data):
-    decoded_bytes = decode_base64_to_bytes(data)
-    return decoded_bytes.decode('utf-8')
-
-
-def sign_string(key, string_to_sign, key_is_base64=True):
-    if key_is_base64:
-        key = decode_base64_to_bytes(key)
-    else:
-        if isinstance(key, six.text_type):
-            key = key.encode('utf-8')
-    if isinstance(string_to_sign, six.text_type):
-        string_to_sign = string_to_sign.encode('utf-8')
-    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
-    digest = signed_hmac_sha256.digest()
-    encoded_digest = encode_base64(digest)
-    return encoded_digest
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/authentication.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/authentication.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/authentication.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/authentication.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,136 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import logging
-import sys
-
-try:
-    from urllib.parse import urlparse, unquote
-except ImportError:
-    from urlparse import urlparse # type: ignore
-    from urllib2 import unquote # type: ignore
-
-try:
-    from yarl import URL
-except ImportError:
-    pass
-
-try:
-    from azure.core.pipeline.transport import AioHttpTransport
-except ImportError:
-    AioHttpTransport = None
-
-from azure.core.exceptions import ClientAuthenticationError
-from azure.core.pipeline.policies import SansIOHTTPPolicy
-
-from . import sign_string
-
-
-logger = logging.getLogger(__name__)
-
-
-
-# wraps a given exception with the desired exception type
-def _wrap_exception(ex, desired_type):
-    msg = ""
-    if ex.args:
-        msg = ex.args[0]
-    if sys.version_info >= (3,):
-        # Automatic chaining in Python 3 means we keep the trace
-        return desired_type(msg)
-    # There isn't a good solution in 2 for keeping the stack trace
-    # in general, or that will not result in an error in 3
-    # However, we can keep the previous error type and message
-    # TODO: In the future we will log the trace
-    return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
-
-
-class AzureSigningError(ClientAuthenticationError):
-    """
-    Represents a fatal error when attempting to sign a request.
-    In general, the cause of this exception is user error. For example, the given account key is not valid.
-    Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
-    """
-
-
-# pylint: disable=no-self-use
-class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
-
-    def __init__(self, account_name, account_key):
-        self.account_name = account_name
-        self.account_key = account_key
-        super(SharedKeyCredentialPolicy, self).__init__()
-
-    def _get_headers(self, request, headers_to_sign):
-        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
-        if 'content-length' in headers and headers['content-length'] == '0':
-            del headers['content-length']
-        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
-
-    def _get_verb(self, request):
-        return request.http_request.method + '\n'
-
-    def _get_canonicalized_resource(self, request):
-        uri_path = urlparse(request.http_request.url).path
-        try:
-            if isinstance(request.context.transport, AioHttpTransport) or \
-                isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport):
-                uri_path = URL(uri_path)
-                return '/' + self.account_name + str(uri_path)
-        except TypeError:
-            pass
-        return '/' + self.account_name + uri_path
-
-    def _get_canonicalized_headers(self, request):
-        string_to_sign = ''
-        x_ms_headers = []
-        for name, value in request.http_request.headers.items():
-            if name.startswith('x-ms-'):
-                x_ms_headers.append((name.lower(), value))
-        x_ms_headers.sort()
-        for name, value in x_ms_headers:
-            if value is not None:
-                string_to_sign += ''.join([name, ':', value, '\n'])
-        return string_to_sign
-
-    def _get_canonicalized_resource_query(self, request):
-        sorted_queries = [(name, value) for name, value in request.http_request.query.items()]
-        sorted_queries.sort()
-
-        string_to_sign = ''
-        for name, value in sorted_queries:
-            if value is not None:
-                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
-
-        return string_to_sign
-
-    def _add_authorization_header(self, request, string_to_sign):
-        try:
-            signature = sign_string(self.account_key, string_to_sign)
-            auth_string = 'SharedKey ' + self.account_name + ':' + signature
-            request.http_request.headers['Authorization'] = auth_string
-        except Exception as ex:
-            # Wrap any error that occurred as signing error
-            # Doing so will clarify/locate the source of problem
-            raise _wrap_exception(ex, AzureSigningError)
-
-    def on_request(self, request):
-        string_to_sign = \
-            self._get_verb(request) + \
-            self._get_headers(
-                request,
-                [
-                    'content-encoding', 'content-language', 'content-length',
-                    'content-md5', 'content-type', 'date', 'if-modified-since',
-                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
-                ]
-            ) + \
-            self._get_canonicalized_headers(request) + \
-            self._get_canonicalized_resource(request) + \
-            self._get_canonicalized_resource_query(request)
-
-        self._add_authorization_header(request, string_to_sign)
-        #logger.debug("String_to_sign=%s", string_to_sign)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,427 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union,
-    Optional,
-    Any,
-    Iterable,
-    Dict,
-    List,
-    Type,
-    Tuple,
-    TYPE_CHECKING,
-)
-import logging
-
-try:
-    from urllib.parse import parse_qs, quote
-except ImportError:
-    from urlparse import parse_qs  # type: ignore
-    from urllib2 import quote  # type: ignore
-
-import six
-
-from azure.core.configuration import Configuration
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline import Pipeline
-from azure.core.pipeline.transport import RequestsTransport, HttpTransport
-from azure.core.pipeline.policies import (
-    RedirectPolicy,
-    ContentDecodePolicy,
-    BearerTokenCredentialPolicy,
-    ProxyPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-)
-
-from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .models import LocationMode
-from .authentication import SharedKeyCredentialPolicy
-from .shared_access_signature import QueryStringConstants
-from .policies import (
-    StorageHeadersPolicy,
-    StorageUserAgentPolicy,
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageResponseHook,
-    StorageLoggingPolicy,
-    StorageHosts,
-    QueueMessagePolicy,
-    ExponentialRetry,
-)
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-
-_LOGGER = logging.getLogger(__name__)
-_SERVICE_PARAMS = {
-    "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
-    "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"},
-    "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"},
-    "dfs": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
-}
-
-
-class StorageAccountHostsMixin(object):  # pylint: disable=too-many-instance-attributes
-    def __init__(
-        self,
-        parsed_url,  # type: Any
-        service,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
-        self._hosts = kwargs.get("_hosts")
-        self.scheme = parsed_url.scheme
-
-        if service not in ["blob", "queue", "file-share", "dfs"]:
-            raise ValueError("Invalid service: {}".format(service))
-        service_name = service.split('-')[0]
-        account = parsed_url.netloc.split(".{}.core.".format(service_name))
-        self.account_name = account[0] if len(account) > 1 else None
-        secondary_hostname = None
-
-        self.credential = format_shared_key_credential(account, credential)
-        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
-            raise ValueError("Token credential is only supported with HTTPS.")
-        if hasattr(self.credential, "account_name"):
-            self.account_name = self.credential.account_name
-            secondary_hostname = "{}-secondary.{}.{}".format(
-                self.credential.account_name, service_name, SERVICE_HOST_BASE)
-
-        if not self._hosts:
-            if len(account) > 1:
-                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
-            if kwargs.get("secondary_hostname"):
-                secondary_hostname = kwargs["secondary_hostname"]
-            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
-            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
-
-        self.require_encryption = kwargs.get("require_encryption", False)
-        self.key_encryption_key = kwargs.get("key_encryption_key")
-        self.key_resolver_function = kwargs.get("key_resolver_function")
-        self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)
-
-    def __enter__(self):
-        self._client.__enter__()
-        return self
-
-    def __exit__(self, *args):
-        self._client.__exit__(*args)
-
-    def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        self._client.close()
-
-    @property
-    def url(self):
-        """The full endpoint URL to this entity, including SAS token if used.
-
-        This could be either the primary endpoint,
-        or the secondary endpoint depending on the current :func:`location_mode`.
-        """
-        return self._format_url(self._hosts[self._location_mode])
-
-    @property
-    def primary_endpoint(self):
-        """The full primary endpoint URL.
-
-        :type: str
-        """
-        return self._format_url(self._hosts[LocationMode.PRIMARY])
-
-    @property
-    def primary_hostname(self):
-        """The hostname of the primary endpoint.
-
-        :type: str
-        """
-        return self._hosts[LocationMode.PRIMARY]
-
-    @property
-    def secondary_endpoint(self):
-        """The full secondary endpoint URL if configured.
-
-        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str
-        :raise ValueError:
-        """
-        if not self._hosts[LocationMode.SECONDARY]:
-            raise ValueError("No secondary host configured.")
-        return self._format_url(self._hosts[LocationMode.SECONDARY])
-
-    @property
-    def secondary_hostname(self):
-        """The hostname of the secondary endpoint.
-
-        If not available this will be None. To explicitly specify a secondary hostname, use the optional
-        `secondary_hostname` keyword argument on instantiation.
-
-        :type: str or None
-        """
-        return self._hosts[LocationMode.SECONDARY]
-
-    @property
-    def location_mode(self):
-        """The location mode that the client is currently using.
-
-        By default this will be "primary". Options include "primary" and "secondary".
-
-        :type: str
-        """
-
-        return self._location_mode
-
-    @location_mode.setter
-    def location_mode(self, value):
-        if self._hosts.get(value):
-            self._location_mode = value
-            self._client._config.url = self.url  # pylint: disable=protected-access
-        else:
-            raise ValueError("No host URL for location mode: {}".format(value))
-
-    @property
-    def api_version(self):
-        """The version of the Storage API used for requests.
-
-        :type: str
-        """
-        return self._client._config.version  # pylint: disable=protected-access
-
-    def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None):
-        query_str = "?"
-        if snapshot:
-            query_str += "snapshot={}&".format(self.snapshot)
-        if share_snapshot:
-            query_str += "sharesnapshot={}&".format(self.snapshot)
-        if sas_token and not credential:
-            query_str += sas_token
-        elif is_credential_sastoken(credential):
-            query_str += credential.lstrip("?")
-            credential = None
-        return query_str.rstrip("?&"), credential
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, "get_token"):
-            self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-
-        config = kwargs.get("_configuration") or create_configuration(**kwargs)
-        if kwargs.get("_pipeline"):
-            return config, kwargs["_pipeline"]
-        config.transport = kwargs.get("transport")  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            config.transport = RequestsTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            RedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs),
-            config.retry_policy,
-            config.logging_policy,
-            StorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs)
-        ]
-        return config, Pipeline(config.transport, policies=policies)
-
-    def _batch_send(
-        self, *reqs,  # type: HttpRequest
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ]
-        )
-
-        pipeline_response = self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts()
-            if raise_on_any_failure:
-                parts = list(response.parts())
-                if any(p for p in parts if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts
-                    )
-                    raise error
-                return iter(parts)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-class TransportWrapper(HttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, transport):
-        self._transport = transport
-
-    def send(self, request, **kwargs):
-        return self._transport.send(request, **kwargs)
-
-    def open(self):
-        pass
-
-    def close(self):
-        pass
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, *args):  # pylint: disable=arguments-differ
-        pass
-
-
-def format_shared_key_credential(account, credential):
-    if isinstance(credential, six.string_types):
-        if len(account) < 2:
-            raise ValueError("Unable to determine account name for shared key credential.")
-        credential = {"account_name": account[0], "account_key": credential}
-    if isinstance(credential, dict):
-        if "account_name" not in credential:
-            raise ValueError("Shared key credential missing 'account_name")
-        if "account_key" not in credential:
-            raise ValueError("Shared key credential missing 'account_key")
-        return SharedKeyCredentialPolicy(**credential)
-    return credential
-
-
-def parse_connection_str(conn_str, credential, service):
-    conn_str = conn_str.rstrip(";")
-    conn_settings = [s.split("=", 1) for s in conn_str.split(";")]
-    if any(len(tup) != 2 for tup in conn_settings):
-        raise ValueError("Connection string is either blank or malformed.")
-    conn_settings = dict(conn_settings)
-    endpoints = _SERVICE_PARAMS[service]
-    primary = None
-    secondary = None
-    if not credential:
-        try:
-            credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]}
-        except KeyError:
-            credential = conn_settings.get("SharedAccessSignature")
-    if endpoints["primary"] in conn_settings:
-        primary = conn_settings[endpoints["primary"]]
-        if endpoints["secondary"] in conn_settings:
-            secondary = conn_settings[endpoints["secondary"]]
-    else:
-        if endpoints["secondary"] in conn_settings:
-            raise ValueError("Connection string specifies only secondary endpoint.")
-        try:
-            primary = "{}://{}.{}.{}".format(
-                conn_settings["DefaultEndpointsProtocol"],
-                conn_settings["AccountName"],
-                service,
-                conn_settings["EndpointSuffix"],
-            )
-            secondary = "{}-secondary.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings["EndpointSuffix"]
-            )
-        except KeyError:
-            pass
-
-    if not primary:
-        try:
-            primary = "https://{}.{}.{}".format(
-                conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE)
-            )
-        except KeyError:
-            raise ValueError("Connection string missing required connection details.")
-    return primary, secondary, credential
-
-
-def create_configuration(**kwargs):
-    # type: (**Any) -> Configuration
-    config = Configuration(**kwargs)
-    config.headers_policy = StorageHeadersPolicy(**kwargs)
-    config.user_agent_policy = StorageUserAgentPolicy(**kwargs)
-    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
-    config.logging_policy = StorageLoggingPolicy(**kwargs)
-    config.proxy_policy = ProxyPolicy(**kwargs)
-
-    # Storage settings
-    config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024)
-    config.copy_polling_interval = 15
-
-    # Block blob uploads
-    config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024)
-    config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1)
-    config.use_byte_buffer = kwargs.get("use_byte_buffer", False)
-
-    # Page blob uploads
-    config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024)
-
-    # Blob downloads
-    config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024)
-    config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024)
-
-    # File uploads
-    config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024)
-    return config
-
-
-def parse_query(query_str):
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
-    sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values]
-    sas_token = None
-    if sas_params:
-        sas_token = "&".join(sas_params)
-
-    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
-    return snapshot, sas_token
-
-
-def is_credential_sastoken(credential):
-    if not credential or not isinstance(credential, six.string_types):
-        return False
-
-    sas_values = QueryStringConstants.to_list()
-    parsed_query = parse_qs(credential.lstrip("?"))
-    if parsed_query and all([k in sas_values for k in parsed_query.keys()]):
-        return True
-    return False
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,176 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-from azure.core.pipeline import AsyncPipeline
-from azure.core.async_paging import AsyncList
-from azure.core.exceptions import HttpResponseError
-from azure.core.pipeline.policies import (
-    ContentDecodePolicy,
-    AsyncBearerTokenCredentialPolicy,
-    AsyncRedirectPolicy,
-    DistributedTracingPolicy,
-    HttpLoggingPolicy,
-)
-from azure.core.pipeline.transport import AsyncHttpTransport
-
-from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT
-from .authentication import SharedKeyCredentialPolicy
-from .base_client import create_configuration
-from .policies import (
-    StorageContentValidation,
-    StorageRequestHook,
-    StorageHosts,
-    StorageHeadersPolicy,
-    QueueMessagePolicy
-)
-from .policies_async import AsyncStorageResponseHook
-
-from .._generated.models import StorageErrorException
-from .response_handlers import process_storage_error, PartialBatchErrorException
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import Pipeline
-    from azure.core.pipeline.transport import HttpRequest
-    from azure.core.configuration import Configuration
-_LOGGER = logging.getLogger(__name__)
-
-
-class AsyncStorageAccountHostsMixin(object):
-
-    def __enter__(self):
-        raise TypeError("Async client only supports 'async with'.")
-
-    def __exit__(self, *args):
-        pass
-
-    async def __aenter__(self):
-        await self._client.__aenter__()
-        return self
-
-    async def __aexit__(self, *args):
-        await self._client.__aexit__(*args)
-
-    async def close(self):
-        """ This method is to close the sockets opened by the client.
-        It need not be used when using with a context manager.
-        """
-        await self._client.close()
-
-    def _create_pipeline(self, credential, **kwargs):
-        # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
-        self._credential_policy = None
-        if hasattr(credential, 'get_token'):
-            self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
-        elif isinstance(credential, SharedKeyCredentialPolicy):
-            self._credential_policy = credential
-        elif credential is not None:
-            raise TypeError("Unsupported credential: {}".format(credential))
-        config = kwargs.get('_configuration') or create_configuration(**kwargs)
-        if kwargs.get('_pipeline'):
-            return config, kwargs['_pipeline']
-        config.transport = kwargs.get('transport')  # type: ignore
-        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
-        kwargs.setdefault("read_timeout", READ_TIMEOUT)
-        if not config.transport:
-            try:
-                from azure.core.pipeline.transport import AioHttpTransport
-            except ImportError:
-                raise ImportError("Unable to create async transport. Please check aiohttp is installed.")
-            config.transport = AioHttpTransport(**kwargs)
-        policies = [
-            QueueMessagePolicy(),
-            config.headers_policy,
-            config.proxy_policy,
-            config.user_agent_policy,
-            StorageContentValidation(),
-            StorageRequestHook(**kwargs),
-            self._credential_policy,
-            ContentDecodePolicy(response_encoding="utf-8"),
-            AsyncRedirectPolicy(**kwargs),
-            StorageHosts(hosts=self._hosts, **kwargs), # type: ignore
-            config.retry_policy,
-            config.logging_policy,
-            AsyncStorageResponseHook(**kwargs),
-            DistributedTracingPolicy(**kwargs),
-            HttpLoggingPolicy(**kwargs),
-        ]
-        return config, AsyncPipeline(config.transport, policies=policies)
-
-    async def _batch_send(
-        self, *reqs: 'HttpRequest',
-        **kwargs
-    ):
-        """Given a series of request, do a Storage batch call.
-        """
-        # Pop it here, so requests doesn't feel bad about additional kwarg
-        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
-        request = self._client._client.post(  # pylint: disable=protected-access
-            url='https://{}/?comp=batch'.format(self.primary_hostname),
-            headers={
-                'x-ms-version': self.api_version
-            }
-        )
-
-        request.set_multipart_mixed(
-            *reqs,
-            policies=[
-                StorageHeadersPolicy(),
-                self._credential_policy
-            ]
-        )
-
-        pipeline_response = await self._pipeline.run(
-            request, **kwargs
-        )
-        response = pipeline_response.http_response
-
-        try:
-            if response.status_code not in [202]:
-                raise HttpResponseError(response=response)
-            parts = response.parts() # Return an AsyncIterator
-            if raise_on_any_failure:
-                parts_list = []
-                async for part in parts:
-                    parts_list.append(part)
-                if any(p for p in parts_list if not 200 <= p.status_code < 300):
-                    error = PartialBatchErrorException(
-                        message="There is a partial failure in the batch operation.",
-                        response=response, parts=parts_list
-                    )
-                    raise error
-                return AsyncList(parts_list)
-            return parts
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-
-class AsyncTransportWrapper(AsyncHttpTransport):
-    """Wrapper class that ensures that an inner client created
-    by a `get_client` method does not close the outer transport for the parent
-    when used in a context manager.
-    """
-    def __init__(self, async_transport):
-        self._transport = async_transport
-
-    async def send(self, request, **kwargs):
-        return await self._transport.send(request, **kwargs)
-
-    async def open(self):
-        pass
-
-    async def close(self):
-        pass
-
-    async def __aenter__(self):
-        pass
-
-    async def __aexit__(self, *args):  # pylint: disable=arguments-differ
-        pass
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/constants.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/constants.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/constants.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/constants.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-from .._generated.version import VERSION
-
-
-X_MS_VERSION = VERSION
-
-# Socket timeout in seconds
-CONNECTION_TIMEOUT = 20
-READ_TIMEOUT = 20
-
-# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
-# The socket timeout is now the maximum total duration to send all data.
-if sys.version_info >= (3, 5):
-    # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
-    # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
-    READ_TIMEOUT = 2000
-
-STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
-
-SERVICE_HOST_BASE = 'core.windows.net'
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/encryption.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/encryption.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/encryption.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/encryption.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,542 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import os
-from os import urandom
-from json import (
-    dumps,
-    loads,
-)
-from collections import OrderedDict
-
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.ciphers import Cipher
-from cryptography.hazmat.primitives.ciphers.algorithms import AES
-from cryptography.hazmat.primitives.ciphers.modes import CBC
-from cryptography.hazmat.primitives.padding import PKCS7
-
-from azure.core.exceptions import HttpResponseError
-
-from .._version import VERSION
-from . import encode_base64, decode_base64_to_bytes
-
-
-_ENCRYPTION_PROTOCOL_V1 = '1.0'
-_ERROR_OBJECT_INVALID = \
-    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
-
-
-def _validate_not_none(param_name, param):
-    if param is None:
-        raise ValueError('{0} should not be None.'.format(param_name))
-
-
-def _validate_key_encryption_key_wrap(kek):
-    # Note that None is not callable and so will fail the second clause of each check.
-    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
-    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
-
-
-class _EncryptionAlgorithm(object):
-    '''
-    Specifies which client encryption algorithm is used.
-    '''
-    AES_CBC_256 = 'AES_CBC_256'
-
-
-class _WrappedContentKey:
-    '''
-    Represents the envelope key details stored on the service.
-    '''
-
-    def __init__(self, algorithm, encrypted_key, key_id):
-        '''
-        :param str algorithm:
-            The algorithm used for wrapping.
-        :param bytes encrypted_key:
-            The encrypted content-encryption-key.
-        :param str key_id:
-            The key-encryption-key identifier string.
-        '''
-
-        _validate_not_none('algorithm', algorithm)
-        _validate_not_none('encrypted_key', encrypted_key)
-        _validate_not_none('key_id', key_id)
-
-        self.algorithm = algorithm
-        self.encrypted_key = encrypted_key
-        self.key_id = key_id
-
-
-class _EncryptionAgent:
-    '''
-    Represents the encryption agent stored on the service.
-    It consists of the encryption protocol version and encryption algorithm used.
-    '''
-
-    def __init__(self, encryption_algorithm, protocol):
-        '''
-        :param _EncryptionAlgorithm encryption_algorithm:
-            The algorithm used for encrypting the message contents.
-        :param str protocol:
-            The protocol version used for encryption.
-        '''
-
-        _validate_not_none('encryption_algorithm', encryption_algorithm)
-        _validate_not_none('protocol', protocol)
-
-        self.encryption_algorithm = str(encryption_algorithm)
-        self.protocol = protocol
-
-
-class _EncryptionData:
-    '''
-    Represents the encryption data that is stored on the service.
-    '''
-
-    def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
-                 key_wrapping_metadata):
-        '''
-        :param bytes content_encryption_IV:
-            The content encryption initialization vector.
-        :param _EncryptionAgent encryption_agent:
-            The encryption agent.
-        :param _WrappedContentKey wrapped_content_key:
-            An object that stores the wrapping algorithm, the key identifier,
-            and the encrypted key bytes.
-        :param dict key_wrapping_metadata:
-            A dict containing metadata related to the key wrapping.
-        '''
-
-        _validate_not_none('content_encryption_IV', content_encryption_IV)
-        _validate_not_none('encryption_agent', encryption_agent)
-        _validate_not_none('wrapped_content_key', wrapped_content_key)
-
-        self.content_encryption_IV = content_encryption_IV
-        self.encryption_agent = encryption_agent
-        self.wrapped_content_key = wrapped_content_key
-        self.key_wrapping_metadata = key_wrapping_metadata
-
-
-def _generate_encryption_data_dict(kek, cek, iv):
-    '''
-    Generates and returns the encryption metadata as a dict.
-
-    :param object kek: The key encryption key. See calling functions for more information.
-    :param bytes cek: The content encryption key.
-    :param bytes iv: The initialization vector.
-    :return: A dict containing all the encryption metadata.
-    :rtype: dict
-    '''
-    # Encrypt the cek.
-    wrapped_cek = kek.wrap_key(cek)
-
-    # Build the encryption_data dict.
-    # Use OrderedDict to comply with Java's ordering requirement.
-    wrapped_content_key = OrderedDict()
-    wrapped_content_key['KeyId'] = kek.get_kid()
-    wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
-    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
-
-    encryption_agent = OrderedDict()
-    encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
-    encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
-
-    encryption_data_dict = OrderedDict()
-    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
-    encryption_data_dict['EncryptionAgent'] = encryption_agent
-    encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
-    encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
-
-    return encryption_data_dict
-
-
-def _dict_to_encryption_data(encryption_data_dict):
-    '''
-    Converts the specified dictionary to an EncryptionData object for
-    eventual use in decryption.
-
-    :param dict encryption_data_dict:
-        The dictionary containing the encryption data.
-    :return: an _EncryptionData object built from the dictionary.
-    :rtype: _EncryptionData
-    '''
-    try:
-        if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
-            raise ValueError("Unsupported encryption version.")
-    except KeyError:
-        raise ValueError("Unsupported encryption version.")
-    wrapped_content_key = encryption_data_dict['WrappedContentKey']
-    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
-                                             decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
-                                             wrapped_content_key['KeyId'])
-
-    encryption_agent = encryption_data_dict['EncryptionAgent']
-    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
-                                        encryption_agent['Protocol'])
-
-    if 'KeyWrappingMetadata' in encryption_data_dict:
-        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
-    else:
-        key_wrapping_metadata = None
-
-    encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
-                                      encryption_agent,
-                                      wrapped_content_key,
-                                      key_wrapping_metadata)
-
-    return encryption_data
-
-
-def _generate_AES_CBC_cipher(cek, iv):
-    '''
-    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
-
-    :param bytes[] cek: The content encryption key for the cipher.
-    :param bytes[] iv: The initialization vector for the cipher.
-    :return: A cipher for encrypting in AES256 CBC.
-    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
-    '''
-
-    backend = default_backend()
-    algorithm = AES(cek)
-    mode = CBC(iv)
-    return Cipher(algorithm, mode, backend)
-
-
-def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
-    '''
-    Extracts and returns the content_encryption_key stored in the encryption_data object
-    and performs necessary validation on all parameters.
-    :param _EncryptionData encryption_data:
-        The encryption metadata of the retrieved value.
-    :param obj key_encryption_key:
-        The key_encryption_key used to unwrap the cek. Please refer to high-level service object
-        instance variables for more details.
-    :param func key_resolver:
-        A function used that, given a key_id, will return a key_encryption_key. Please refer
-        to high-level service object instance variables for more details.
-    :return: the content_encryption_key stored in the encryption_data object.
-    :rtype: bytes[]
-    '''
-
-    _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
-    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
-
-    if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
-        raise ValueError('Encryption version is not supported.')
-
-    content_encryption_key = None
-
-    # If the resolver exists, give priority to the key it finds.
-    if key_resolver is not None:
-        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
-
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
-    if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
-        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
-    if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
-        raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
-    # Will throw an exception if the specified algorithm is not supported.
-    content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
-                                                           encryption_data.wrapped_content_key.algorithm)
-    _validate_not_none('content_encryption_key', content_encryption_key)
-
-    return content_encryption_key
-
-
-def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
-    '''
-    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
-    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
-    Returns the original plaintex.
-
-    :param str message:
-        The ciphertext to be decrypted.
-    :param _EncryptionData encryption_data:
-        The metadata associated with this ciphertext.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted plaintext.
-    :rtype: str
-    '''
-    _validate_not_none('message', message)
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
-
-    if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
-
-    # decrypt data
-    decrypted_data = message
-    decryptor = cipher.decryptor()
-    decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
-
-    # unpad data
-    unpadder = PKCS7(128).unpadder()
-    decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
-
-    return decrypted_data
-
-
-def encrypt_blob(blob, key_encryption_key):
-    '''
-    Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encryption metadata. This method should
-    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
-    is done as a part of the upload_data_chunks method.
-
-    :param bytes blob:
-        The blob to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
-    :rtype: (str, bytes)
-    '''
-
-    _validate_not_none('blob', blob)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = urandom(32)
-    initialization_vector = urandom(16)
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(blob) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
-                                                     initialization_vector)
-    encryption_data['EncryptionMode'] = 'FullBlob'
-
-    return dumps(encryption_data), encrypted_data
-
-
-def generate_blob_encryption_data(key_encryption_key):
-    '''
-    Generates the encryption_metadata for the blob.
-
-    :param bytes key_encryption_key:
-        The key-encryption-key used to wrap the cek associate with this blob.
-    :return: A tuple containing the cek and iv for this blob as well as the
-        serialized encryption metadata for the blob.
-    :rtype: (bytes, bytes, str)
-    '''
-    encryption_data = None
-    content_encryption_key = None
-    initialization_vector = None
-    if key_encryption_key:
-        _validate_key_encryption_key_wrap(key_encryption_key)
-        content_encryption_key = urandom(32)
-        initialization_vector = urandom(16)
-        encryption_data = _generate_encryption_data_dict(key_encryption_key,
-                                                         content_encryption_key,
-                                                         initialization_vector)
-        encryption_data['EncryptionMode'] = 'FullBlob'
-        encryption_data = dumps(encryption_data)
-
-    return content_encryption_key, initialization_vector, encryption_data
-
-
-def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
-                 content, start_offset, end_offset, response_headers):
-    '''
-    Decrypts the given blob contents and returns only the requested range.
-
-    :param bool require_encryption:
-        Whether or not the calling blob service requires objects to be decrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :param key_resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The decrypted blob content.
-    :rtype: bytes
-    '''
-    try:
-        encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
-    except:  # pylint: disable=bare-except
-        if require_encryption:
-            raise ValueError(
-                'Encryption required, but received data does not contain appropriate metatadata.' + \
-                'Data was either not encrypted or metadata has been lost.')
-
-        return content
-
-    if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
-        raise ValueError('Specified encryption algorithm is not supported.')
-
-    blob_type = response_headers['x-ms-blob-type']
-
-    iv = None
-    unpad = False
-    if 'content-range' in response_headers:
-        content_range = response_headers['content-range']
-        # Format: 'bytes x-y/size'
-
-        # Ignore the word 'bytes'
-        content_range = content_range.split(' ')
-
-        content_range = content_range[1].split('-')
-        content_range = content_range[1].split('/')
-        end_range = int(content_range[0])
-        blob_size = int(content_range[1])
-
-        if start_offset >= 16:
-            iv = content[:16]
-            content = content[16:]
-            start_offset -= 16
-        else:
-            iv = encryption_data.content_encryption_IV
-
-        if end_range == blob_size - 1:
-            unpad = True
-    else:
-        unpad = True
-        iv = encryption_data.content_encryption_IV
-
-    if blob_type == 'PageBlob':
-        unpad = False
-
-    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
-    decryptor = cipher.decryptor()
-
-    content = decryptor.update(content) + decryptor.finalize()
-    if unpad:
-        unpadder = PKCS7(128).unpadder()
-        content = unpadder.update(content) + unpadder.finalize()
-
-    return content[start_offset: len(content) - end_offset]
-
-
-def get_blob_encryptor_and_padder(cek, iv, should_pad):
-    encryptor = None
-    padder = None
-
-    if cek is not None and iv is not None:
-        cipher = _generate_AES_CBC_cipher(cek, iv)
-        encryptor = cipher.encryptor()
-        padder = PKCS7(128).padder() if should_pad else None
-
-    return encryptor, padder
-
-
-def encrypt_queue_message(message, key_encryption_key):
-    '''
-    Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
-    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
-    Returns a json-formatted string containing the encrypted message and the encryption metadata.
-
-    :param object message:
-        The plain text messge to be encrypted.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
-        get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
-        get_kid()--returns a string key id for this key-encryption-key.
-    :return: A json-formatted string containing the encrypted message and the encryption metadata.
-    :rtype: str
-    '''
-
-    _validate_not_none('message', message)
-    _validate_not_none('key_encryption_key', key_encryption_key)
-    _validate_key_encryption_key_wrap(key_encryption_key)
-
-    # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
-    content_encryption_key = os.urandom(32)
-    initialization_vector = os.urandom(16)
-
-    # Queue encoding functions all return unicode strings, and encryption should
-    # operate on binary strings.
-    message = message.encode('utf-8')
-
-    cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
-
-    # PKCS7 with 16 byte blocks ensures compatibility with AES.
-    padder = PKCS7(128).padder()
-    padded_data = padder.update(message) + padder.finalize()
-
-    # Encrypt the data.
-    encryptor = cipher.encryptor()
-    encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
-
-    # Build the dictionary structure.
-    queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
-                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
-                                                                      content_encryption_key,
-                                                                      initialization_vector)}
-
-    return dumps(queue_message)
-
-
-def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
-    '''
-    Returns the decrypted message contents from an EncryptedQueueMessage.
-    If no encryption metadata is present, will return the unaltered message.
-    :param str message:
-        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
-    :param bool require_encryption:
-        If set, will enforce that the retrieved messages are encrypted and decrypt them.
-    :param object key_encryption_key:
-        The user-provided key-encryption-key. Must implement the following methods:
-        unwrap_key(key, algorithm)
-            - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
-        get_kid()
-            - returns a string key id for this key-encryption-key.
-    :param function resolver(kid):
-        The user-provided key resolver. Uses the kid string to return a key-encryption-key
-        implementing the interface defined above.
-    :return: The plain text message from the queue message.
-    :rtype: str
-    '''
-
-    try:
-        message = loads(message)
-
-        encryption_data = _dict_to_encryption_data(message['EncryptionData'])
-        decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
-    except (KeyError, ValueError):
-        # Message was not json formatted and so was not encrypted
-        # or the user provided a json formatted message.
-        if require_encryption:
-            raise ValueError('Message was not encrypted.')
-
-        return message
-    try:
-        return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
-    except Exception as error:
-        raise HttpResponseError(
-            message="Decryption failed.",
-            response=response,
-            error=error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,425 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from enum import Enum
-
-
-def get_enum_value(value):
-    if value is None or value in ["None", ""]:
-        return None
-    try:
-        return value.value
-    except AttributeError:
-        return value
-
-
-class StorageErrorCode(str, Enum):
-
-    # Generic storage values
-    account_already_exists = "AccountAlreadyExists"
-    account_being_created = "AccountBeingCreated"
-    account_is_disabled = "AccountIsDisabled"
-    authentication_failed = "AuthenticationFailed"
-    authorization_failure = "AuthorizationFailure"
-    condition_headers_not_supported = "ConditionHeadersNotSupported"
-    condition_not_met = "ConditionNotMet"
-    empty_metadata_key = "EmptyMetadataKey"
-    insufficient_account_permissions = "InsufficientAccountPermissions"
-    internal_error = "InternalError"
-    invalid_authentication_info = "InvalidAuthenticationInfo"
-    invalid_header_value = "InvalidHeaderValue"
-    invalid_http_verb = "InvalidHttpVerb"
-    invalid_input = "InvalidInput"
-    invalid_md5 = "InvalidMd5"
-    invalid_metadata = "InvalidMetadata"
-    invalid_query_parameter_value = "InvalidQueryParameterValue"
-    invalid_range = "InvalidRange"
-    invalid_resource_name = "InvalidResourceName"
-    invalid_uri = "InvalidUri"
-    invalid_xml_document = "InvalidXmlDocument"
-    invalid_xml_node_value = "InvalidXmlNodeValue"
-    md5_mismatch = "Md5Mismatch"
-    metadata_too_large = "MetadataTooLarge"
-    missing_content_length_header = "MissingContentLengthHeader"
-    missing_required_query_parameter = "MissingRequiredQueryParameter"
-    missing_required_header = "MissingRequiredHeader"
-    missing_required_xml_node = "MissingRequiredXmlNode"
-    multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
-    operation_timed_out = "OperationTimedOut"
-    out_of_range_input = "OutOfRangeInput"
-    out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
-    request_body_too_large = "RequestBodyTooLarge"
-    resource_type_mismatch = "ResourceTypeMismatch"
-    request_url_failed_to_parse = "RequestUrlFailedToParse"
-    resource_already_exists = "ResourceAlreadyExists"
-    resource_not_found = "ResourceNotFound"
-    server_busy = "ServerBusy"
-    unsupported_header = "UnsupportedHeader"
-    unsupported_xml_node = "UnsupportedXmlNode"
-    unsupported_query_parameter = "UnsupportedQueryParameter"
-    unsupported_http_verb = "UnsupportedHttpVerb"
-
-    # Blob values
-    append_position_condition_not_met = "AppendPositionConditionNotMet"
-    blob_already_exists = "BlobAlreadyExists"
-    blob_not_found = "BlobNotFound"
-    blob_overwritten = "BlobOverwritten"
-    blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
-    block_count_exceeds_limit = "BlockCountExceedsLimit"
-    block_list_too_long = "BlockListTooLong"
-    cannot_change_to_lower_tier = "CannotChangeToLowerTier"
-    cannot_verify_copy_source = "CannotVerifyCopySource"
-    container_already_exists = "ContainerAlreadyExists"
-    container_being_deleted = "ContainerBeingDeleted"
-    container_disabled = "ContainerDisabled"
-    container_not_found = "ContainerNotFound"
-    content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
-    copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
-    copy_id_mismatch = "CopyIdMismatch"
-    feature_version_mismatch = "FeatureVersionMismatch"
-    incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
-    incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
-    incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
-    infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
-    invalid_blob_or_block = "InvalidBlobOrBlock"
-    invalid_blob_tier = "InvalidBlobTier"
-    invalid_blob_type = "InvalidBlobType"
-    invalid_block_id = "InvalidBlockId"
-    invalid_block_list = "InvalidBlockList"
-    invalid_operation = "InvalidOperation"
-    invalid_page_range = "InvalidPageRange"
-    invalid_source_blob_type = "InvalidSourceBlobType"
-    invalid_source_blob_url = "InvalidSourceBlobUrl"
-    invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
-    lease_already_present = "LeaseAlreadyPresent"
-    lease_already_broken = "LeaseAlreadyBroken"
-    lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
-    lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
-    lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
-    lease_id_missing = "LeaseIdMissing"
-    lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
-    lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
-    lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
-    lease_lost = "LeaseLost"
-    lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
-    lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
-    lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
-    max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
-    no_pending_copy_operation = "NoPendingCopyOperation"
-    operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
-    pending_copy_operation = "PendingCopyOperation"
-    previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
-    previous_snapshot_not_found = "PreviousSnapshotNotFound"
-    previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
-    sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
-    sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
-    snapshot_count_exceeded = "SnapshotCountExceeded"
-    snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
-    snapshots_present = "SnapshotsPresent"
-    source_condition_not_met = "SourceConditionNotMet"
-    system_in_use = "SystemInUse"
-    target_condition_not_met = "TargetConditionNotMet"
-    unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
-    blob_being_rehydrated = "BlobBeingRehydrated"
-    blob_archived = "BlobArchived"
-    blob_not_archived = "BlobNotArchived"
-
-    # Queue values
-    invalid_marker = "InvalidMarker"
-    message_not_found = "MessageNotFound"
-    message_too_large = "MessageTooLarge"
-    pop_receipt_mismatch = "PopReceiptMismatch"
-    queue_already_exists = "QueueAlreadyExists"
-    queue_being_deleted = "QueueBeingDeleted"
-    queue_disabled = "QueueDisabled"
-    queue_not_empty = "QueueNotEmpty"
-    queue_not_found = "QueueNotFound"
-
-    # File values
-    cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
-    client_cache_flush_delay = "ClientCacheFlushDelay"
-    delete_pending = "DeletePending"
-    directory_not_empty = "DirectoryNotEmpty"
-    file_lock_conflict = "FileLockConflict"
-    invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
-    parent_not_found = "ParentNotFound"
-    read_only_attribute = "ReadOnlyAttribute"
-    share_already_exists = "ShareAlreadyExists"
-    share_being_deleted = "ShareBeingDeleted"
-    share_disabled = "ShareDisabled"
-    share_not_found = "ShareNotFound"
-    sharing_violation = "SharingViolation"
-    share_snapshot_in_progress = "ShareSnapshotInProgress"
-    share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
-    share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
-    share_has_snapshots = "ShareHasSnapshots"
-    container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
-
-
-class DictMixin(object):
-
-    def __setitem__(self, key, item):
-        self.__dict__[key] = item
-
-    def __getitem__(self, key):
-        return self.__dict__[key]
-
-    def __repr__(self):
-        return str(self)
-
-    def __len__(self):
-        return len(self.keys())
-
-    def __delitem__(self, key):
-        self.__dict__[key] = None
-
-    def __eq__(self, other):
-        """Compare objects by comparing all attributes."""
-        if isinstance(other, self.__class__):
-            return self.__dict__ == other.__dict__
-        return False
-
-    def __ne__(self, other):
-        """Compare objects by comparing all attributes."""
-        return not self.__eq__(other)
-
-    def __str__(self):
-        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
-
-    def has_key(self, k):
-        return k in self.__dict__
-
-    def update(self, *args, **kwargs):
-        return self.__dict__.update(*args, **kwargs)
-
-    def keys(self):
-        return [k for k in self.__dict__ if not k.startswith('_')]
-
-    def values(self):
-        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def items(self):
-        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
-
-    def get(self, key, default=None):
-        if key in self.__dict__:
-            return self.__dict__[key]
-        return default
-
-
-class LocationMode(object):
-    """
-    Specifies the location the request should be sent to. This mode only applies
-    for RA-GRS accounts which allow secondary read access. All other account types
-    must use PRIMARY.
-    """
-
-    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
-    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
-
-
-class ResourceTypes(object):
-    """
-    Specifies the resource types that are accessible with the account SAS.
-
-    :param bool service:
-        Access to service-level APIs (e.g., Get/Set Service Properties,
-        Get Service Stats, List Containers/Queues/Shares)
-    :param bool container:
-        Access to container-level APIs (e.g., Create/Delete Container,
-        Create/Delete Queue, Create/Delete Share,
-        List Blobs/Files and Directories)
-    :param bool object:
-        Access to object-level APIs for blobs, queue messages, and
-        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
-    """
-
-    def __init__(self, service=False, container=False, object=False):  # pylint: disable=redefined-builtin
-        self.service = service
-        self.container = container
-        self.object = object
-        self._str = (('s' if self.service else '') +
-                ('c' if self.container else '') +
-                ('o' if self.object else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create a ResourceTypes from a string.
-
-        To specify service, container, or object you need only to
-        include the first letter of the word in the string. E.g. service and container,
-        you would provide a string "sc".
-
-        :param str string: Specify service, container, or object in
-            in the string with the first letter of the word.
-        :return: A ResourceTypes object
-        :rtype: ~azure.storage.queue.ResourceTypes
-        """
-        res_service = 's' in string
-        res_container = 'c' in string
-        res_object = 'o' in string
-
-        parsed = cls(res_service, res_container, res_object)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class AccountSasPermissions(object):
-    """
-    :class:`~ResourceTypes` class to be used with generate_account_sas
-    function and for the AccessPolicies used with set_*_acl. There are two types of
-    SAS which may be used to grant resource access. One is to grant access to a
-    specific resource (resource-specific). Another is to grant access to the
-    entire service for a specific account and allow certain operations based on
-    perms found here.
-
-    :param bool read:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits read permissions to the specified resource type.
-    :param bool write:
-        Valid for all signed resources types (Service, Container, and Object).
-        Permits write permissions to the specified resource type.
-    :param bool delete:
-        Valid for Container and Object resource types, except for queue messages.
-    :param bool list:
-        Valid for Service and Container resource types only.
-    :param bool add:
-        Valid for the following Object resource types only: queue messages, and append blobs.
-    :param bool create:
-        Valid for the following Object resource types only: blobs and files.
-        Users can create new blobs or files, but may not overwrite existing
-        blobs or files.
-    :param bool update:
-        Valid for the following Object resource types only: queue messages.
-    :param bool process:
-        Valid for the following Object resource type only: queue messages.
-    """
-    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
-                 add=False, create=False, update=False, process=False):
-        self.read = read
-        self.write = write
-        self.delete = delete
-        self.list = list
-        self.add = add
-        self.create = create
-        self.update = update
-        self.process = process
-        self._str = (('r' if self.read else '') +
-                     ('w' if  self.write else '') +
-                     ('d' if self.delete else '') +
-                     ('l' if self.list else '') +
-                     ('a' if self.add else '') +
-                     ('c' if self.create else '') +
-                     ('u' if self.update else '') +
-                     ('p' if self.process else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, permission):
-        """Create AccountSasPermissions from a string.
-
-        To specify read, write, delete, etc. permissions you need only to
-        include the first letter of the word in the string. E.g. for read and write
-        permissions you would provide a string "rw".
-
-        :param str permission: Specify permissions in
-            the string with the first letter of the word.
-        :return: A AccountSasPermissions object
-        :rtype: ~azure.storage.queue.AccountSasPermissions
-        """
-        p_read = 'r' in permission
-        p_write = 'w' in permission
-        p_delete = 'd' in permission
-        p_list = 'l' in permission
-        p_add = 'a' in permission
-        p_create = 'c' in permission
-        p_update = 'u' in permission
-        p_process = 'p' in permission
-
-        parsed = cls(p_read, p_write, p_delete, p_list, p_add, p_create, p_update, p_process)
-        parsed._str = permission # pylint: disable = protected-access
-        return parsed
-
-class Services(object):
-    """Specifies the services accessible with the account SAS.
-
-    :param bool blob:
-        Access for the `~azure.storage.blob.BlobServiceClient`
-    :param bool queue:
-        Access for the `~azure.storage.queue.QueueServiceClient`
-    :param bool fileshare:
-        Access for the `~azure.storage.fileshare.ShareServiceClient`
-    """
-
-    def __init__(self, blob=False, queue=False, fileshare=False):
-        self.blob = blob
-        self.queue = queue
-        self.fileshare = fileshare
-        self._str = (('b' if self.blob else '') +
-                ('q' if self.queue else '') +
-                ('f' if self.fileshare else ''))
-
-    def __str__(self):
-        return self._str
-
-    @classmethod
-    def from_string(cls, string):
-        """Create Services from a string.
-
-        To specify blob, queue, or file you need only to
-        include the first letter of the word in the string. E.g. for blob and queue
-        you would provide a string "bq".
-
-        :param str string: Specify blob, queue, or file in
-            in the string with the first letter of the word.
-        :return: A Services object
-        :rtype: ~azure.storage.queue.Services
-        """
-        res_blob = 'b' in string
-        res_queue = 'q' in string
-        res_file = 'f' in string
-
-        parsed = cls(res_blob, res_queue, res_file)
-        parsed._str = string  # pylint: disable = protected-access
-        return parsed
-
-
-class UserDelegationKey(object):
-    """
-    Represents a user delegation key, provided to the user by Azure Storage
-    based on their Azure Active Directory access token.
-
-    The fields are saved as simple strings since the user does not have to interact with this object;
-    to generate an identify SAS, the user can simply pass it to the right API.
-
-    :ivar str signed_oid:
-        Object ID of this token.
-    :ivar str signed_tid:
-        Tenant ID of the tenant that issued this token.
-    :ivar str signed_start:
-        The datetime this token becomes valid.
-    :ivar str signed_expiry:
-        The datetime this token expires.
-    :ivar str signed_service:
-        What service this key is valid for.
-    :ivar str signed_version:
-        The version identifier of the REST service that created this token.
-    :ivar str value:
-        The user delegation key.
-    """
-    def __init__(self):
-        self.signed_oid = None
-        self.signed_tid = None
-        self.signed_start = None
-        self.signed_expiry = None
-        self.signed_service = None
-        self.signed_version = None
-        self.value = None
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/parser.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/parser.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/parser.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/parser.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import sys
-
-if sys.version_info < (3,):
-    def _str(value):
-        if isinstance(value, unicode):  # pylint: disable=undefined-variable
-            return value.encode('utf-8')
-
-        return str(value)
-else:
-    _str = str
-
-
-def _to_utc_datetime(value):
-    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,638 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import base64
-import hashlib
-import re
-import random
-from time import time
-from io import SEEK_SET, UnsupportedOperation
-import logging
-import uuid
-import types
-import platform
-from typing import Any, TYPE_CHECKING
-from wsgiref.handlers import format_date_time
-try:
-    from urllib.parse import (
-        urlparse,
-        parse_qsl,
-        urlunparse,
-        urlencode,
-    )
-except ImportError:
-    from urllib import urlencode # type: ignore
-    from urlparse import ( # type: ignore
-        urlparse,
-        parse_qsl,
-        urlunparse,
-    )
-
-from azure.core.pipeline.policies import (
-    HeadersPolicy,
-    SansIOHTTPPolicy,
-    NetworkTraceLoggingPolicy,
-    HTTPPolicy,
-    RequestHistory
-)
-from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
-
-from .._version import VERSION
-from .models import LocationMode
-
-try:
-    _unicode_type = unicode # type: ignore
-except NameError:
-    _unicode_type = str
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def encode_base64(data):
-    if isinstance(data, _unicode_type):
-        data = data.encode('utf-8')
-    encoded = base64.b64encode(data)
-    return encoded.decode('utf-8')
-
-
-def is_exhausted(settings):
-    """Are we out of retries?"""
-    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
-    retry_counts = list(filter(None, retry_counts))
-    if not retry_counts:
-        return False
-    return min(retry_counts) < 0
-
-
-def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
-
-
-def is_retry(response, mode):
-    """Is this method/status code retryable? (Based on whitelists and control
-    variables such as the number of total retries to allow, whether to
-    respect the Retry-After header, whether this header is present, and
-    whether the returned status code is on the list of status codes to
-    be retried upon on the presence of the aforementioned header)
-    """
-    status = response.http_response.status_code
-    if 300 <= status < 500:
-        # An exception occured, but in most cases it was expected. Examples could
-        # include a 309 Conflict or 412 Precondition Failed.
-        if status == 404 and mode == LocationMode.SECONDARY:
-            # Response code 404 should be retried if secondary was used.
-            return True
-        if status == 408:
-            # Response code 408 is a timeout and should be retried.
-            return True
-        return False
-    if status >= 500:
-        # Response codes above 500 with the exception of 501 Not Implemented and
-        # 505 Version Not Supported indicate a server issue and should be retried.
-        if status in [501, 505]:
-            return False
-        return True
-    return False
-
-
-def urljoin(base_url, stub_url):
-    parsed = urlparse(base_url)
-    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
-    return parsed.geturl()
-
-
-class QueueMessagePolicy(SansIOHTTPPolicy):
-
-    def on_request(self, request):
-        message_id = request.context.options.pop('queue_message_id', None)
-        if message_id:
-            request.http_request.url = urljoin(
-                request.http_request.url,
-                message_id)
-
-
-class StorageHeadersPolicy(HeadersPolicy):
-    request_id_header_name = 'x-ms-client-request-id'
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        super(StorageHeadersPolicy, self).on_request(request)
-        current_time = format_date_time(time())
-        request.http_request.headers['x-ms-date'] = current_time
-
-        custom_id = request.context.options.pop('client_request_id', None)
-        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
-
-    # def on_response(self, request, response):
-    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
-    #     if self.request_id_header_name in response.http_response.headers:
-
-    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
-
-    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
-    #             raise AzureError(
-    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
-    #                 "Service request ID: {}".format(
-    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
-    #                     response.http_response.headers['x-ms-request-id']),
-    #                 response=response.http_response
-    #             )
-
-
-class StorageHosts(SansIOHTTPPolicy):
-
-    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
-        self.hosts = hosts
-        super(StorageHosts, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        request.context.options['hosts'] = self.hosts
-        parsed_url = urlparse(request.http_request.url)
-
-        # Detect what location mode we're currently requesting with
-        location_mode = LocationMode.PRIMARY
-        for key, value in self.hosts.items():
-            if parsed_url.netloc == value:
-                location_mode = key
-
-        # See if a specific location mode has been specified, and if so, redirect
-        use_location = request.context.options.pop('use_location', None)
-        if use_location:
-            # Lock retries to the specific location
-            request.context.options['retry_to_secondary'] = False
-            if use_location not in self.hosts:
-                raise ValueError("Attempting to use undefined host location {}".format(use_location))
-            if use_location != location_mode:
-                # Update request URL to use the specified location
-                updated = parsed_url._replace(netloc=self.hosts[use_location])
-                request.http_request.url = updated.geturl()
-                location_mode = use_location
-
-        request.context.options['location_mode'] = location_mode
-
-
-class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
-    """A policy that logs HTTP request and response to the DEBUG logger.
-
-    This accepts both global configuration, and per-request level with "enable_http_logger"
-    """
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        http_request = request.http_request
-        options = request.context.options
-        if options.pop("logging_enable", self.enable_http_logger):
-            request.context["logging_enable"] = True
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                log_url = http_request.url
-                query_params = http_request.query
-                if 'sig' in query_params:
-                    log_url = log_url.replace(query_params['sig'], "sig=*****")
-                _LOGGER.debug("Request URL: %r", log_url)
-                _LOGGER.debug("Request method: %r", http_request.method)
-                _LOGGER.debug("Request headers:")
-                for header, value in http_request.headers.items():
-                    if header.lower() == 'authorization':
-                        value = '*****'
-                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
-                        # take the url apart and scrub away the signed signature
-                        scheme, netloc, path, params, query, fragment = urlparse(value)
-                        parsed_qs = dict(parse_qsl(query))
-                        parsed_qs['sig'] = '*****'
-
-                        # the SAS needs to be put back together
-                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
-
-                    _LOGGER.debug("    %r: %r", header, value)
-                _LOGGER.debug("Request body:")
-
-                # We don't want to log the binary data of a file upload.
-                if isinstance(http_request.body, types.GeneratorType):
-                    _LOGGER.debug("File upload")
-                else:
-                    _LOGGER.debug(str(http_request.body))
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log request: %r", err)
-
-    def on_response(self, request, response):
-        # type: (PipelineRequest, PipelineResponse, Any) -> None
-        if response.context.pop("logging_enable", self.enable_http_logger):
-            if not _LOGGER.isEnabledFor(logging.DEBUG):
-                return
-
-            try:
-                _LOGGER.debug("Response status: %r", response.http_response.status_code)
-                _LOGGER.debug("Response headers:")
-                for res_header, value in response.http_response.headers.items():
-                    _LOGGER.debug("    %r: %r", res_header, value)
-
-                # We don't want to log binary data if the response is a file.
-                _LOGGER.debug("Response content:")
-                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
-                header = response.http_response.headers.get('content-disposition')
-
-                if header and pattern.match(header):
-                    filename = header.partition('=')[2]
-                    _LOGGER.debug("File attachments: %s", filename)
-                elif response.http_response.headers.get("content-type", "").endswith("octet-stream"):
-                    _LOGGER.debug("Body contains binary data.")
-                elif response.http_response.headers.get("content-type", "").startswith("image"):
-                    _LOGGER.debug("Body contains image data.")
-                else:
-                    if response.context.options.get('stream', False):
-                        _LOGGER.debug("Body is streamable")
-                    else:
-                        _LOGGER.debug(response.http_response.text())
-            except Exception as err:  # pylint: disable=broad-except
-                _LOGGER.debug("Failed to log response: %s", repr(err))
-
-
-class StorageUserAgentPolicy(SansIOHTTPPolicy):
-
-    _USERAGENT = "User-Agent"
-
-    def __init__(self, **kwargs):
-        self._application = kwargs.pop('user_agent', None)
-        storage_sdk = kwargs.pop('storage_sdk')
-        self._user_agent = "azsdk-python-storage-{}/{} Python/{} ({})".format(
-            storage_sdk,
-            VERSION,
-            platform.python_version(),
-            platform.platform())
-        super(StorageUserAgentPolicy, self).__init__()
-
-    def on_request(self, request):
-        existing = request.http_request.headers.get(self._USERAGENT, "")
-        app_string = request.context.options.pop('user_agent', None) or self._application
-        if app_string:
-            request.http_request.headers[self._USERAGENT] = "{} {}".format(
-                app_string, self._user_agent)
-        else:
-            request.http_request.headers[self._USERAGENT] = self._user_agent
-        if existing:
-            request.http_request.headers[self._USERAGENT] += " " + existing
-
-
-class StorageRequestHook(SansIOHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._request_callback = kwargs.get('raw_request_hook')
-        super(StorageRequestHook, self).__init__()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, **Any) -> PipelineResponse
-        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
-        if request_callback:
-            request_callback(request)
-
-
-class StorageResponseHook(HTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(StorageResponseHook, self).__init__()
-
-    def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = self.next.send(request)
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-
-class StorageContentValidation(SansIOHTTPPolicy):
-    """A simple policy that sends the given headers
-    with the request.
-
-    This will overwrite any headers already defined in the request.
-    """
-    header_name = 'Content-MD5'
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        super(StorageContentValidation, self).__init__()
-
-    @staticmethod
-    def get_content_md5(data):
-        md5 = hashlib.md5()
-        if isinstance(data, bytes):
-            md5.update(data)
-        elif hasattr(data, 'read'):
-            pos = 0
-            try:
-                pos = data.tell()
-            except:  # pylint: disable=bare-except
-                pass
-            for chunk in iter(lambda: data.read(4096), b""):
-                md5.update(chunk)
-            try:
-                data.seek(pos, SEEK_SET)
-            except (AttributeError, IOError):
-                raise ValueError("Data should be bytes or a seekable file-like object.")
-        else:
-            raise ValueError("Data should be bytes or a seekable file-like object.")
-
-        return md5.digest()
-
-    def on_request(self, request):
-        # type: (PipelineRequest, Any) -> None
-        validate_content = request.context.options.pop('validate_content', False)
-        if validate_content and request.http_request.method != 'GET':
-            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
-            request.http_request.headers[self.header_name] = computed_md5
-            request.context['validate_content_md5'] = computed_md5
-        request.context['validate_content'] = validate_content
-
-    def on_response(self, request, response):
-        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
-            computed_md5 = request.context.get('validate_content_md5') or \
-                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
-            if response.http_response.headers['content-md5'] != computed_md5:
-                raise AzureError(
-                    'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format(
-                        response.http_response.headers['content-md5'], computed_md5),
-                    response=response.http_response
-                )
-
-
-class StorageRetryPolicy(HTTPPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    def __init__(self, **kwargs):
-        self.total_retries = kwargs.pop('retry_total', 10)
-        self.connect_retries = kwargs.pop('retry_connect', 3)
-        self.read_retries = kwargs.pop('retry_read', 3)
-        self.status_retries = kwargs.pop('retry_status', 3)
-        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
-        super(StorageRetryPolicy, self).__init__()
-
-    def _set_next_host_location(self, settings, request):  # pylint: disable=no-self-use
-        """
-        A function which sets the next host location on the request, if applicable.
-
-        :param ~azure.storage.models.RetryContext context:
-            The retry context containing the previous host location and the request
-            to evaluate and possibly modify.
-        """
-        if settings['hosts'] and all(settings['hosts'].values()):
-            url = urlparse(request.url)
-            # If there's more than one possible location, retry to the alternative
-            if settings['mode'] == LocationMode.PRIMARY:
-                settings['mode'] = LocationMode.SECONDARY
-            else:
-                settings['mode'] = LocationMode.PRIMARY
-            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
-            request.url = updated.geturl()
-
-    def configure_retries(self, request):  # pylint: disable=no-self-use
-        body_position = None
-        if hasattr(request.http_request.body, 'read'):
-            try:
-                body_position = request.http_request.body.tell()
-            except (AttributeError, UnsupportedOperation):
-                # if body position cannot be obtained, then retries will not work
-                pass
-        options = request.context.options
-        return {
-            'total': options.pop("retry_total", self.total_retries),
-            'connect': options.pop("retry_connect", self.connect_retries),
-            'read': options.pop("retry_read", self.read_retries),
-            'status': options.pop("retry_status", self.status_retries),
-            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
-            'mode': options.pop("location_mode", LocationMode.PRIMARY),
-            'hosts': options.pop("hosts", None),
-            'hook': options.pop("retry_hook", None),
-            'body_position': body_position,
-            'count': 0,
-            'history': []
-        }
-
-    def get_backoff_time(self, settings):  # pylint: disable=unused-argument,no-self-use
-        """ Formula for computing the current backoff.
-        Should be calculated by child class.
-
-        :rtype: float
-        """
-        return 0
-
-    def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        transport.sleep(backoff)
-
-    def increment(self, settings, request, response=None, error=None):
-        """Increment the retry counters.
-
-        :param response: A pipeline response object.
-        :param error: An error encountered during the request, or
-            None if the response was received successfully.
-
-        :return: Whether the retry attempts are exhausted.
-        """
-        settings['total'] -= 1
-
-        if error and isinstance(error, ServiceRequestError):
-            # Errors when we're fairly sure that the server did not receive the
-            # request, so it should be safe to retry.
-            settings['connect'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        elif error and isinstance(error, ServiceResponseError):
-            # Errors that occur after the request has been started, so we should
-            # assume that the server began processing it.
-            settings['read'] -= 1
-            settings['history'].append(RequestHistory(request, error=error))
-
-        else:
-            # Incrementing because of a server error like a 500 in
-            # status_forcelist and a the given method is in the whitelist
-            if response:
-                settings['status'] -= 1
-                settings['history'].append(RequestHistory(request, http_response=response))
-
-        if not is_exhausted(settings):
-            if request.method not in ['PUT'] and settings['retry_secondary']:
-                self._set_next_host_location(settings, request)
-
-            # rewind the request body if it is a stream
-            if request.body and hasattr(request.body, 'read'):
-                # no position was saved, then retry would not work
-                if settings['body_position'] is None:
-                    return False
-                try:
-                    # attempt to rewind the body to the initial position
-                    request.body.seek(settings['body_position'], SEEK_SET)
-                except (UnsupportedOperation, ValueError):
-                    # if body is not seekable, then retry would not work
-                    return False
-            settings['count'] += 1
-            return True
-        return False
-
-    def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(StorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(StorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,219 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import asyncio
-import random
-import logging
-from typing import Any, TYPE_CHECKING
-
-from azure.core.pipeline.policies import AsyncHTTPPolicy
-from azure.core.exceptions import AzureError
-
-from .policies import is_retry, StorageRetryPolicy
-
-if TYPE_CHECKING:
-    from azure.core.pipeline import PipelineRequest, PipelineResponse
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-async def retry_hook(settings, **kwargs):
-    if settings['hook']:
-        if asyncio.iscoroutine(settings['hook']):
-            await settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-        else:
-            settings['hook'](
-                retry_count=settings['count'] - 1,
-                location_mode=settings['mode'],
-                **kwargs)
-
-
-class AsyncStorageResponseHook(AsyncHTTPPolicy):
-
-    def __init__(self, **kwargs):  # pylint: disable=unused-argument
-        self._response_callback = kwargs.get('raw_response_hook')
-        super(AsyncStorageResponseHook, self).__init__()
-
-    async def send(self, request):
-        # type: (PipelineRequest) -> PipelineResponse
-        data_stream_total = request.context.get('data_stream_total') or \
-            request.context.options.pop('data_stream_total', None)
-        download_stream_current = request.context.get('download_stream_current') or \
-            request.context.options.pop('download_stream_current', None)
-        upload_stream_current = request.context.get('upload_stream_current') or \
-            request.context.options.pop('upload_stream_current', None)
-        response_callback = request.context.get('response_callback') or \
-            request.context.options.pop('raw_response_hook', self._response_callback)
-
-        response = await self.next.send(request)
-        await response.http_response.load_body()
-
-        will_retry = is_retry(response, request.context.options.get('mode'))
-        if not will_retry and download_stream_current is not None:
-            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
-            if data_stream_total is None:
-                content_range = response.http_response.headers.get('Content-Range')
-                if content_range:
-                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
-                else:
-                    data_stream_total = download_stream_current
-        elif not will_retry and upload_stream_current is not None:
-            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
-        for pipeline_obj in [request, response]:
-            pipeline_obj.context['data_stream_total'] = data_stream_total
-            pipeline_obj.context['download_stream_current'] = download_stream_current
-            pipeline_obj.context['upload_stream_current'] = upload_stream_current
-        if response_callback:
-            if asyncio.iscoroutine(response_callback):
-                await response_callback(response)
-            else:
-                response_callback(response)
-            request.context['response_callback'] = response_callback
-        return response
-
-class AsyncStorageRetryPolicy(StorageRetryPolicy):
-    """
-    The base class for Exponential and Linear retries containing shared code.
-    """
-
-    async def sleep(self, settings, transport):
-        backoff = self.get_backoff_time(settings)
-        if not backoff or backoff < 0:
-            return
-        await transport.sleep(backoff)
-
-    async def send(self, request):
-        retries_remaining = True
-        response = None
-        retry_settings = self.configure_retries(request)
-        while retries_remaining:
-            try:
-                response = await self.next.send(request)
-                if is_retry(response, retry_settings['mode']):
-                    retries_remaining = self.increment(
-                        retry_settings,
-                        request=request.http_request,
-                        response=response.http_response)
-                    if retries_remaining:
-                        await retry_hook(
-                            retry_settings,
-                            request=request.http_request,
-                            response=response.http_response,
-                            error=None)
-                        await self.sleep(retry_settings, request.context.transport)
-                        continue
-                break
-            except AzureError as err:
-                retries_remaining = self.increment(
-                    retry_settings, request=request.http_request, error=err)
-                if retries_remaining:
-                    await retry_hook(
-                        retry_settings,
-                        request=request.http_request,
-                        response=None,
-                        error=err)
-                    await self.sleep(retry_settings, request.context.transport)
-                    continue
-                raise err
-        if retry_settings['history']:
-            response.context['history'] = retry_settings['history']
-        response.http_response.location_mode = retry_settings['mode']
-        return response
-
-
-class ExponentialRetry(AsyncStorageRetryPolicy):
-    """Exponential retry."""
-
-    def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
-                 retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        '''
-        Constructs an Exponential retry object. The initial_backoff is used for
-        the first retry. Subsequent retries are retried after initial_backoff +
-        increment_power^retry_count seconds. For example, by default the first retry
-        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
-        third after (15+3^2) = 24 seconds.
-
-        :param int initial_backoff:
-            The initial backoff interval, in seconds, for the first retry.
-        :param int increment_base:
-            The base, in seconds, to increment the initial_backoff by after the
-            first retry.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        '''
-        self.initial_backoff = initial_backoff
-        self.increment_base = increment_base
-        self.random_jitter_range = random_jitter_range
-        super(ExponentialRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
-        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
-        random_range_end = backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
-
-
-class LinearRetry(AsyncStorageRetryPolicy):
-    """Linear retry."""
-
-    def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
-        """
-        Constructs a Linear retry object.
-
-        :param int backoff:
-            The backoff interval, in seconds, between retries.
-        :param int max_attempts:
-            The maximum number of retry attempts.
-        :param bool retry_to_secondary:
-            Whether the request should be retried to secondary, if able. This should
-            only be enabled of RA-GRS accounts are used and potentially stale data
-            can be handled.
-        :param int random_jitter_range:
-            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
-            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
-        """
-        self.backoff = backoff
-        self.random_jitter_range = random_jitter_range
-        super(LinearRetry, self).__init__(
-            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
-
-    def get_backoff_time(self, settings):
-        """
-        Calculates how long to sleep before retrying.
-
-        :return:
-            An integer indicating how long to wait before retrying the request,
-            or None to indicate no retry should be performed.
-        :rtype: int or None
-        """
-        random_generator = random.Random()
-        # the backoff interval normally does not change, however there is the possibility
-        # that it was modified by accessing the property directly after initializing the object
-        random_range_start = self.backoff - self.random_jitter_range \
-            if self.backoff > self.random_jitter_range else 0
-        random_range_end = self.backoff + self.random_jitter_range
-        return random_generator.uniform(random_range_start, random_range_end)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,147 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-
-import logging
-from os import fstat
-from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
-
-import isodate
-
-from azure.core.exceptions import raise_with_traceback
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-def serialize_iso(attr):
-    """Serialize Datetime object into ISO-8601 formatted string.
-
-    :param Datetime attr: Object to be serialized.
-    :rtype: str
-    :raises: ValueError if format invalid.
-    """
-    if not attr:
-        return None
-    if isinstance(attr, str):
-        attr = isodate.parse_datetime(attr)
-    try:
-        utc = attr.utctimetuple()
-        if utc.tm_year > 9999 or utc.tm_year < 1:
-            raise OverflowError("Hit max or min date")
-
-        date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
-            utc.tm_year, utc.tm_mon, utc.tm_mday,
-            utc.tm_hour, utc.tm_min, utc.tm_sec)
-        return date + 'Z'
-    except (ValueError, OverflowError) as err:
-        msg = "Unable to serialize datetime object."
-        raise_with_traceback(ValueError, msg, err)
-    except AttributeError as err:
-        msg = "ISO-8601 object must be valid Datetime object."
-        raise_with_traceback(TypeError, msg, err)
-
-
-def get_length(data):
-    length = None
-    # Check if object implements the __len__ method, covers most input cases such as bytearray.
-    try:
-        length = len(data)
-    except:  # pylint: disable=bare-except
-        pass
-
-    if not length:
-        # Check if the stream is a file-like stream object.
-        # If so, calculate the size using the file descriptor.
-        try:
-            fileno = data.fileno()
-        except (AttributeError, UnsupportedOperation):
-            pass
-        else:
-            try:
-                return fstat(fileno).st_size
-            except OSError:
-                # Not a valid fileno, may be possible requests returned
-                # a socket number?
-                pass
-
-        # If the stream is seekable and tell() is implemented, calculate the stream size.
-        try:
-            current_position = data.tell()
-            data.seek(0, SEEK_END)
-            length = data.tell() - current_position
-            data.seek(current_position, SEEK_SET)
-        except (AttributeError, UnsupportedOperation):
-            pass
-
-    return length
-
-
-def read_length(data):
-    try:
-        if hasattr(data, 'read'):
-            read_data = b''
-            for chunk in iter(lambda: data.read(4096), b""):
-                read_data += chunk
-            return len(read_data), read_data
-        if hasattr(data, '__iter__'):
-            read_data = b''
-            for chunk in data:
-                read_data += chunk
-            return len(read_data), read_data
-    except:  # pylint: disable=bare-except
-        pass
-    raise ValueError("Unable to calculate content length, please specify.")
-
-
-def validate_and_format_range_headers(
-        start_range, end_range, start_range_required=True,
-        end_range_required=True, check_content_md5=False, align_to_page=False):
-    # If end range is provided, start range must be provided
-    if (start_range_required or end_range is not None) and start_range is None:
-        raise ValueError("start_range value cannot be None.")
-    if end_range_required and end_range is None:
-        raise ValueError("end_range value cannot be None.")
-
-    # Page ranges must be 512 aligned
-    if align_to_page:
-        if start_range is not None and start_range % 512 != 0:
-            raise ValueError("Invalid page blob start_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(start_range))
-        if end_range is not None and end_range % 512 != 511:
-            raise ValueError("Invalid page blob end_range: {0}. "
-                             "The size must be aligned to a 512-byte boundary.".format(end_range))
-
-    # Format based on whether end_range is present
-    range_header = None
-    if end_range is not None:
-        range_header = 'bytes={0}-{1}'.format(start_range, end_range)
-    elif start_range is not None:
-        range_header = "bytes={0}-".format(start_range)
-
-    # Content MD5 can only be provided for a complete range less than 4MB in size
-    range_validation = None
-    if check_content_md5:
-        if start_range is None or end_range is None:
-            raise ValueError("Both start and end range requied for MD5 content validation.")
-        if end_range - start_range > 4 * 1024 * 1024:
-            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
-        range_validation = 'true'
-
-    return range_header, range_validation
-
-
-def add_metadata_headers(metadata=None):
-    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
-    headers = {}
-    if metadata:
-        for key, value in metadata.items():
-            headers['x-ms-meta-{}'.format(key)] = value
-    return headers
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/response_handlers.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/response_handlers.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/response_handlers.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/response_handlers.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,159 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
-    TYPE_CHECKING
-)
-import logging
-
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.exceptions import (
-    HttpResponseError,
-    ResourceNotFoundError,
-    ResourceModifiedError,
-    ResourceExistsError,
-    ClientAuthenticationError,
-    DecodeError)
-
-from .parser import _to_utc_datetime
-from .models import StorageErrorCode, UserDelegationKey, get_enum_value
-
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.exceptions import AzureError
-
-
-_LOGGER = logging.getLogger(__name__)
-
-
-class PartialBatchErrorException(HttpResponseError):
-    """There is a partial failure in batch operations.
-
-    :param str message: The message of the exception.
-    :param response: Server response to be deserialized.
-    :param list parts: A list of the parts in multipart response.
-    """
-
-    def __init__(self, message, response, parts):
-        self.parts = parts
-        super(PartialBatchErrorException, self).__init__(message=message, response=response)
-
-
-def parse_length_from_content_range(content_range):
-    '''
-    Parses the blob length from the content range header: bytes 1-3/65537
-    '''
-    if content_range is None:
-        return None
-
-    # First, split in space and take the second half: '1-3/65537'
-    # Next, split on slash and take the second half: '65537'
-    # Finally, convert to an int: 65537
-    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
-
-
-def normalize_headers(headers):
-    normalized = {}
-    for key, value in headers.items():
-        if key.startswith('x-ms-'):
-            key = key[5:]
-        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
-    return normalized
-
-
-def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
-    raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
-    return {k[10:]: v for k, v in raw_metadata.items()}
-
-
-def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers)
-
-
-def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return normalize_headers(response_headers), deserialized
-
-
-def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
-    return response.location_mode, deserialized
-
-
-def process_storage_error(storage_error):
-    raise_error = HttpResponseError
-    error_code = storage_error.response.headers.get('x-ms-error-code')
-    error_message = storage_error.message
-    additional_data = {}
-    try:
-        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
-        if error_body:
-            for info in error_body.iter():
-                if info.tag.lower() == 'code':
-                    error_code = info.text
-                elif info.tag.lower() == 'message':
-                    error_message = info.text
-                else:
-                    additional_data[info.tag] = info.text
-    except DecodeError:
-        pass
-
-    try:
-        if error_code:
-            error_code = StorageErrorCode(error_code)
-            if error_code in [StorageErrorCode.condition_not_met,
-                              StorageErrorCode.blob_overwritten]:
-                raise_error = ResourceModifiedError
-            if error_code in [StorageErrorCode.invalid_authentication_info,
-                              StorageErrorCode.authentication_failed]:
-                raise_error = ClientAuthenticationError
-            if error_code in [StorageErrorCode.resource_not_found,
-                              StorageErrorCode.cannot_verify_copy_source,
-                              StorageErrorCode.blob_not_found,
-                              StorageErrorCode.queue_not_found,
-                              StorageErrorCode.container_not_found,
-                              StorageErrorCode.parent_not_found,
-                              StorageErrorCode.share_not_found]:
-                raise_error = ResourceNotFoundError
-            if error_code in [StorageErrorCode.account_already_exists,
-                              StorageErrorCode.account_being_created,
-                              StorageErrorCode.resource_already_exists,
-                              StorageErrorCode.resource_type_mismatch,
-                              StorageErrorCode.blob_already_exists,
-                              StorageErrorCode.queue_already_exists,
-                              StorageErrorCode.container_already_exists,
-                              StorageErrorCode.container_being_deleted,
-                              StorageErrorCode.queue_being_deleted,
-                              StorageErrorCode.share_already_exists,
-                              StorageErrorCode.share_being_deleted]:
-                raise_error = ResourceExistsError
-    except ValueError:
-        # Got an unknown error code
-        pass
-
-    try:
-        error_message += "\nErrorCode:{}".format(error_code.value)
-    except AttributeError:
-        error_message += "\nErrorCode:{}".format(error_code)
-    for name, info in additional_data.items():
-        error_message += "\n{}:{}".format(name, info)
-
-    error = raise_error(message=error_message, response=storage_error.response)
-    error.error_code = error_code
-    error.additional_info = additional_data
-    raise error
-
-
-def parse_to_internal_user_delegation_key(service_user_delegation_key):
-    internal_user_delegation_key = UserDelegationKey()
-    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
-    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
-    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
-    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
-    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
-    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
-    internal_user_delegation_key.value = service_user_delegation_key.value
-    return internal_user_delegation_key
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,209 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from datetime import date
-
-from .parser import _str, _to_utc_datetime
-from .constants import X_MS_VERSION
-from . import sign_string, url_quote
-
-
-class QueryStringConstants(object):
-    SIGNED_SIGNATURE = 'sig'
-    SIGNED_PERMISSION = 'sp'
-    SIGNED_START = 'st'
-    SIGNED_EXPIRY = 'se'
-    SIGNED_RESOURCE = 'sr'
-    SIGNED_IDENTIFIER = 'si'
-    SIGNED_IP = 'sip'
-    SIGNED_PROTOCOL = 'spr'
-    SIGNED_VERSION = 'sv'
-    SIGNED_CACHE_CONTROL = 'rscc'
-    SIGNED_CONTENT_DISPOSITION = 'rscd'
-    SIGNED_CONTENT_ENCODING = 'rsce'
-    SIGNED_CONTENT_LANGUAGE = 'rscl'
-    SIGNED_CONTENT_TYPE = 'rsct'
-    START_PK = 'spk'
-    START_RK = 'srk'
-    END_PK = 'epk'
-    END_RK = 'erk'
-    SIGNED_RESOURCE_TYPES = 'srt'
-    SIGNED_SERVICES = 'ss'
-    SIGNED_OID = 'skoid'
-    SIGNED_TID = 'sktid'
-    SIGNED_KEY_START = 'skt'
-    SIGNED_KEY_EXPIRY = 'ske'
-    SIGNED_KEY_SERVICE = 'sks'
-    SIGNED_KEY_VERSION = 'skv'
-
-    @staticmethod
-    def to_list():
-        return [
-            QueryStringConstants.SIGNED_SIGNATURE,
-            QueryStringConstants.SIGNED_PERMISSION,
-            QueryStringConstants.SIGNED_START,
-            QueryStringConstants.SIGNED_EXPIRY,
-            QueryStringConstants.SIGNED_RESOURCE,
-            QueryStringConstants.SIGNED_IDENTIFIER,
-            QueryStringConstants.SIGNED_IP,
-            QueryStringConstants.SIGNED_PROTOCOL,
-            QueryStringConstants.SIGNED_VERSION,
-            QueryStringConstants.SIGNED_CACHE_CONTROL,
-            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
-            QueryStringConstants.SIGNED_CONTENT_ENCODING,
-            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
-            QueryStringConstants.SIGNED_CONTENT_TYPE,
-            QueryStringConstants.START_PK,
-            QueryStringConstants.START_RK,
-            QueryStringConstants.END_PK,
-            QueryStringConstants.END_RK,
-            QueryStringConstants.SIGNED_RESOURCE_TYPES,
-            QueryStringConstants.SIGNED_SERVICES,
-            QueryStringConstants.SIGNED_OID,
-            QueryStringConstants.SIGNED_TID,
-            QueryStringConstants.SIGNED_KEY_START,
-            QueryStringConstants.SIGNED_KEY_EXPIRY,
-            QueryStringConstants.SIGNED_KEY_SERVICE,
-            QueryStringConstants.SIGNED_KEY_VERSION,
-        ]
-
-
-class SharedAccessSignature(object):
-    '''
-    Provides a factory for creating account access
-    signature tokens with an account name and account key. Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        :param str x_ms_version:
-            The service version used to generate the shared access signatures.
-        '''
-        self.account_name = account_name
-        self.account_key = account_key
-        self.x_ms_version = x_ms_version
-
-    def generate_account(self, services, resource_types, permission, expiry, start=None,
-                         ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the account.
-        Use the returned signature with the sas_token parameter of the service
-        or to create a new account object.
-
-        :param ResourceTypes resource_types:
-            Specifies the resource types that are accessible with the account
-            SAS. You can combine values to provide access to more than one
-            resource type.
-        :param AccountSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy. You can combine
-            values to provide more than one permission.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _SharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_account(services, resource_types)
-        sas.add_account_signature(self.account_name, self.account_key)
-
-        return sas.get_token()
-
-
-class _SharedAccessHelper(object):
-    def __init__(self):
-        self.query_dict = {}
-
-    def _add_query(self, name, val):
-        if val:
-            self.query_dict[name] = _str(val) if val is not None else None
-
-    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
-        if isinstance(start, date):
-            start = _to_utc_datetime(start)
-
-        if isinstance(expiry, date):
-            expiry = _to_utc_datetime(expiry)
-
-        self._add_query(QueryStringConstants.SIGNED_START, start)
-        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
-        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
-        self._add_query(QueryStringConstants.SIGNED_IP, ip)
-        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
-        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
-
-    def add_resource(self, resource):
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
-
-    def add_id(self, policy_id):
-        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
-
-    def add_account(self, services, resource_types):
-        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
-        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
-
-    def add_override_response_headers(self, cache_control,
-                                      content_disposition,
-                                      content_encoding,
-                                      content_language,
-                                      content_type):
-        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
-        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
-
-    def add_account_signature(self, account_name, account_key):
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        string_to_sign = \
-            (account_name + '\n' +
-             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
-             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
-             get_value_to_append(QueryStringConstants.SIGNED_START) +
-             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(QueryStringConstants.SIGNED_VERSION))
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key, string_to_sign))
-
-    def get_token(self):
-        return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,548 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-from concurrent import futures
-from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
-from threading import Lock
-from itertools import islice
-from math import ceil
-
-import six
-
-from azure.core.tracing.common import with_current_context
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
-
-
-def _parallel_uploads(executor, uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(executor.submit(with_current_context(uploader), next_chunk))
-
-    # Wait for the remaining uploads to finish
-    done, _running = futures.wait(running)
-    range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        validate_content=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        validate_content=validate_content,
-        **kwargs)
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_chunk), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        executor = futures.ThreadPoolExecutor(max_concurrency)
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            executor.submit(with_current_context(uploader.process_substream_block), u)
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b""
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError("Blob data should be of type bytes.")
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b"" or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    def _update_progress(self, length):
-        if self.progress_lock is not None:
-            with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = self._upload_chunk(chunk_offset, chunk_data)
-        self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    def process_substream_block(self, block_data):
-        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = self._upload_substream_block(block_id, block_stream)
-        self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop("modified_access_conditions", None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return index, block_id
-
-    def _upload_substream_block(self, block_id, block_stream):
-        try:
-            self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        return not any(bytearray(chunk_data))
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-            self.current_length = int(self.response_headers["blob_append_offset"])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options
-            )
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _upload_chunk(self, chunk_offset, chunk_data):
-        length = len(chunk_data)
-        chunk_end = chunk_offset + length - 1
-        response = self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            length,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
-
-
-class SubStream(IOBase):
-
-    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
-        # Python 2.7: file-like objects created with open() typically support seek(), but are not
-        # derivations of io.IOBase and thus do not implement seekable().
-        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
-        try:
-            # only the main thread runs this, so there's no need grabbing the lock
-            wrapped_stream.seek(0, SEEK_CUR)
-        except:
-            raise ValueError("Wrapped stream must support seek().")
-
-        self._lock = lockObj
-        self._wrapped_stream = wrapped_stream
-        self._position = 0
-        self._stream_begin_index = stream_begin_index
-        self._length = length
-        self._buffer = BytesIO()
-
-        # we must avoid buffering more than necessary, and also not use up too much memory
-        # so the max buffer size is capped at 4MB
-        self._max_buffer_size = (
-            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
-        )
-        self._current_buffer_start = 0
-        self._current_buffer_size = 0
-        super(SubStream, self).__init__()
-
-    def __len__(self):
-        return self._length
-
-    def close(self):
-        if self._buffer:
-            self._buffer.close()
-        self._wrapped_stream = None
-        IOBase.close(self)
-
-    def fileno(self):
-        return self._wrapped_stream.fileno()
-
-    def flush(self):
-        pass
-
-    def read(self, size=None):
-        if self.closed:  # pylint: disable=using-constant-test
-            raise ValueError("Stream is closed.")
-
-        if size is None:
-            size = self._length - self._position
-
-        # adjust if out of bounds
-        if size + self._position >= self._length:
-            size = self._length - self._position
-
-        # return fast
-        if size == 0 or self._buffer.closed:
-            return b""
-
-        # attempt first read from the read buffer and update position
-        read_buffer = self._buffer.read(size)
-        bytes_read = len(read_buffer)
-        bytes_remaining = size - bytes_read
-        self._position += bytes_read
-
-        # repopulate the read buffer from the underlying stream to fulfill the request
-        # ensure the seek and read operations are done atomically (only if a lock is provided)
-        if bytes_remaining > 0:
-            with self._buffer:
-                # either read in the max buffer size specified on the class
-                # or read in just enough data for the current block/sub stream
-                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
-
-                # lock is only defined if max_concurrency > 1 (parallel uploads)
-                if self._lock:
-                    with self._lock:
-                        # reposition the underlying stream to match the start of the data to read
-                        absolute_position = self._stream_begin_index + self._position
-                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
-                        # If we can't seek to the right location, our read will be corrupted so fail fast.
-                        if self._wrapped_stream.tell() != absolute_position:
-                            raise IOError("Stream failed to seek to the desired location.")
-                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-                else:
-                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
-
-            if buffer_from_stream:
-                # update the buffer with new data from the wrapped stream
-                # we need to note down the start position and size of the buffer, in case seek is performed later
-                self._buffer = BytesIO(buffer_from_stream)
-                self._current_buffer_start = self._position
-                self._current_buffer_size = len(buffer_from_stream)
-
-                # read the remaining bytes from the new buffer and update position
-                second_read_buffer = self._buffer.read(bytes_remaining)
-                read_buffer += second_read_buffer
-                self._position += len(second_read_buffer)
-
-        return read_buffer
-
-    def readable(self):
-        return True
-
-    def readinto(self, b):
-        raise UnsupportedOperation
-
-    def seek(self, offset, whence=0):
-        if whence is SEEK_SET:
-            start_index = 0
-        elif whence is SEEK_CUR:
-            start_index = self._position
-        elif whence is SEEK_END:
-            start_index = self._length
-            offset = -offset
-        else:
-            raise ValueError("Invalid argument for the 'whence' parameter.")
-
-        pos = start_index + offset
-
-        if pos > self._length:
-            pos = self._length
-        elif pos < 0:
-            pos = 0
-
-        # check if buffer is still valid
-        # if not, drop buffer
-        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
-            self._buffer.close()
-            self._buffer = BytesIO()
-        else:  # if yes seek to correct position
-            delta = pos - self._current_buffer_start
-            self._buffer.seek(delta, SEEK_SET)
-
-        self._position = pos
-        return pos
-
-    def seekable(self):
-        return True
-
-    def tell(self):
-        return self._position
-
-    def write(self):
-        raise UnsupportedOperation
-
-    def writelines(self):
-        raise UnsupportedOperation
-
-    def writeable(self):
-        return False
-
-
-class IterStreamer(object):
-    """
-    File-like streaming iterator.
-    """
-
-    def __init__(self, generator, encoding="UTF-8"):
-        self.generator = generator
-        self.iterator = iter(generator)
-        self.leftover = b""
-        self.encoding = encoding
-
-    def __len__(self):
-        return self.generator.__len__()
-
-    def __iter__(self):
-        return self.iterator
-
-    def seekable(self):
-        return False
-
-    def next(self):
-        return next(self.iterator)
-
-    def tell(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator does not support tell.")
-
-    def seek(self, *args, **kwargs):
-        raise UnsupportedOperation("Data generator is unseekable.")
-
-    def read(self, size):
-        data = self.leftover
-        count = len(self.leftover)
-        try:
-            while count < size:
-                chunk = self.next()
-                if isinstance(chunk, six.text_type):
-                    chunk = chunk.encode(self.encoding)
-                data += chunk
-                count += len(chunk)
-        except StopIteration:
-            pass
-
-        if count > size:
-            self.leftover = data[size:]
-
-        return data[:size]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,350 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=no-self-use
-
-import asyncio
-from asyncio import Lock
-from itertools import islice
-import threading
-
-from math import ceil
-
-import six
-
-from . import encode_base64, url_quote
-from .request_handlers import get_length
-from .response_handlers import return_response_headers
-from .encryption import get_blob_encryptor_and_padder
-from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
-
-
-_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
-_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
-
-
-async def _parallel_uploads(uploader, pending, running):
-    range_ids = []
-    while True:
-        # Wait for some download to finish before adding a new one
-        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
-        range_ids.extend([chunk.result() for chunk in done])
-        try:
-            next_chunk = next(pending)
-        except StopIteration:
-            break
-        else:
-            running.add(asyncio.ensure_future(uploader(next_chunk)))
-
-    # Wait for the remaining uploads to finish
-    if running:
-        done, _running = await asyncio.wait(running)
-        range_ids.extend([chunk.result() for chunk in done])
-    return range_ids
-
-
-async def upload_data_chunks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        encryption_options=None,
-        **kwargs):
-
-    if encryption_options:
-        encryptor, padder = get_blob_encryptor_and_padder(
-            encryption_options.get('cek'),
-            encryption_options.get('vector'),
-            uploader_class is not PageBlobChunkUploader)
-        kwargs['encryptor'] = encryptor
-        kwargs['padder'] = padder
-
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_chunk_streams()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_chunk(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for chunk in uploader.get_chunk_streams():
-            range_ids.append(await uploader.process_chunk(chunk))
-
-    if any(range_ids):
-        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
-    return uploader.response_headers
-
-
-async def upload_substream_blocks(
-        service=None,
-        uploader_class=None,
-        total_size=None,
-        chunk_size=None,
-        max_concurrency=None,
-        stream=None,
-        **kwargs):
-    parallel = max_concurrency > 1
-    if parallel and 'modified_access_conditions' in kwargs:
-        # Access conditions do not work with parallelism
-        kwargs['modified_access_conditions'] = None
-    uploader = uploader_class(
-        service=service,
-        total_size=total_size,
-        chunk_size=chunk_size,
-        stream=stream,
-        parallel=parallel,
-        **kwargs)
-
-    if parallel:
-        upload_tasks = uploader.get_substream_blocks()
-        running_futures = [
-            asyncio.ensure_future(uploader.process_substream_block(u))
-            for u in islice(upload_tasks, 0, max_concurrency)
-        ]
-        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
-    else:
-        range_ids = []
-        for block in uploader.get_substream_blocks():
-            range_ids.append(await uploader.process_substream_block(block))
-    return sorted(range_ids)
-
-
-class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
-
-    def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
-        self.service = service
-        self.total_size = total_size
-        self.chunk_size = chunk_size
-        self.stream = stream
-        self.parallel = parallel
-
-        # Stream management
-        self.stream_start = stream.tell() if parallel else None
-        self.stream_lock = threading.Lock() if parallel else None
-
-        # Progress feedback
-        self.progress_total = 0
-        self.progress_lock = Lock() if parallel else None
-
-        # Encryption
-        self.encryptor = encryptor
-        self.padder = padder
-        self.response_headers = None
-        self.etag = None
-        self.last_modified = None
-        self.request_options = kwargs
-
-    def get_chunk_streams(self):
-        index = 0
-        while True:
-            data = b''
-            read_size = self.chunk_size
-
-            # Buffer until we either reach the end of the stream or get a whole chunk.
-            while True:
-                if self.total_size:
-                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
-                temp = self.stream.read(read_size)
-                if not isinstance(temp, six.binary_type):
-                    raise TypeError('Blob data should be of type bytes.')
-                data += temp or b""
-
-                # We have read an empty string and so are at the end
-                # of the buffer or we have read a full chunk.
-                if temp == b'' or len(data) == self.chunk_size:
-                    break
-
-            if len(data) == self.chunk_size:
-                if self.padder:
-                    data = self.padder.update(data)
-                if self.encryptor:
-                    data = self.encryptor.update(data)
-                yield index, data
-            else:
-                if self.padder:
-                    data = self.padder.update(data) + self.padder.finalize()
-                if self.encryptor:
-                    data = self.encryptor.update(data) + self.encryptor.finalize()
-                if data:
-                    yield index, data
-                break
-            index += len(data)
-
-    async def process_chunk(self, chunk_data):
-        chunk_bytes = chunk_data[1]
-        chunk_offset = chunk_data[0]
-        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
-
-    async def _update_progress(self, length):
-        if self.progress_lock is not None:
-            async with self.progress_lock:
-                self.progress_total += length
-        else:
-            self.progress_total += length
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
-        range_id = await self._upload_chunk(chunk_offset, chunk_data)
-        await self._update_progress(len(chunk_data))
-        return range_id
-
-    def get_substream_blocks(self):
-        assert self.chunk_size is not None
-        lock = self.stream_lock
-        blob_length = self.total_size
-
-        if blob_length is None:
-            blob_length = get_length(self.stream)
-            if blob_length is None:
-                raise ValueError("Unable to determine content length of upload data.")
-
-        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
-        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
-
-        for i in range(blocks):
-            index = i * self.chunk_size
-            length = last_block_size if i == blocks - 1 else self.chunk_size
-            yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
-
-    async def process_substream_block(self, block_data):
-        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        raise NotImplementedError("Must be implemented by child class.")
-
-    async def _upload_substream_block_with_progress(self, block_id, block_stream):
-        range_id = await self._upload_substream_block(block_id, block_stream)
-        await self._update_progress(len(block_stream))
-        return range_id
-
-    def set_response_properties(self, resp):
-        self.etag = resp.etag
-        self.last_modified = resp.last_modified
-
-
-class BlockBlobChunkUploader(_ChunkUploader):
-
-    def __init__(self, *args, **kwargs):
-        kwargs.pop('modified_access_conditions', None)
-        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # TODO: This is incorrect, but works with recording.
-        index = '{0:032d}'.format(chunk_offset)
-        block_id = encode_base64(url_quote(encode_base64(index)))
-        await self.service.stage_block(
-            block_id,
-            len(chunk_data),
-            chunk_data,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options)
-        return index, block_id
-
-    async def _upload_substream_block(self, block_id, block_stream):
-        try:
-            await self.service.stage_block(
-                block_id,
-                len(block_stream),
-                block_stream,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-        finally:
-            block_stream.close()
-        return block_id
-
-
-class PageBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def _is_chunk_empty(self, chunk_data):
-        # read until non-zero byte is encountered
-        # if reached the end without returning, then chunk_data is all 0's
-        for each_byte in chunk_data:
-            if each_byte not in [0, b'\x00']:
-                return False
-        return True
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        # avoid uploading the empty pages
-        if not self._is_chunk_empty(chunk_data):
-            chunk_end = chunk_offset + len(chunk_data) - 1
-            content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-            computed_md5 = None
-            self.response_headers = await self.service.upload_pages(
-                chunk_data,
-                content_length=len(chunk_data),
-                transactional_content_md5=computed_md5,
-                range=content_range,
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-            if not self.parallel and self.request_options.get('modified_access_conditions'):
-                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
-
-
-class AppendBlobChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    def __init__(self, *args, **kwargs):
-        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
-        self.current_length = None
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        if self.current_length is None:
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-            self.current_length = int(self.response_headers['blob_append_offset'])
-        else:
-            self.request_options['append_position_access_conditions'].append_position = \
-                self.current_length + chunk_offset
-            self.response_headers = await self.service.append_block(
-                chunk_data,
-                content_length=len(chunk_data),
-                cls=return_response_headers,
-                data_stream_total=self.total_size,
-                upload_stream_current=self.progress_total,
-                **self.request_options)
-
-
-class FileChunkUploader(_ChunkUploader):  # pylint: disable=abstract-method
-
-    async def _upload_chunk(self, chunk_offset, chunk_data):
-        chunk_end = chunk_offset + len(chunk_data) - 1
-        response = await self.service.upload_range(
-            chunk_data,
-            chunk_offset,
-            chunk_end,
-            data_stream_total=self.total_size,
-            upload_stream_current=self.progress_total,
-            **self.request_options
-        )
-        range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
-        return range_id, response
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,261 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, TYPE_CHECKING
-)
-
-from azure.storage.queue._shared import sign_string
-from azure.storage.queue._shared.constants import X_MS_VERSION
-from azure.storage.queue._shared.models import Services
-from azure.storage.queue._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \
-    QueryStringConstants
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.storage.queue import (
-        ResourceTypes,
-        AccountSasPermissions,
-        QueueSasPermissions
-    )
-
-class QueueSharedAccessSignature(SharedAccessSignature):
-    '''
-    Provides a factory for creating queue shares access
-    signature tokens with a common account name and account key.  Users can either
-    use the factory or can construct the appropriate service and use the
-    generate_*_shared_access_signature method directly.
-    '''
-
-    def __init__(self, account_name, account_key):
-        '''
-        :param str account_name:
-            The storage account name used to generate the shared access signatures.
-        :param str account_key:
-            The access key to generate the shares access signatures.
-        '''
-        super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
-
-    def generate_queue(self, queue_name, permission=None,
-                       expiry=None, start=None, policy_id=None,
-                       ip=None, protocol=None):
-        '''
-        Generates a shared access signature for the queue.
-        Use the returned signature with the sas_token parameter of QueueService.
-        :param str queue_name:
-            Name of queue.
-        :param QueueSasPermissions permission:
-            The permissions associated with the shared access signature. The
-            user is restricted to operations allowed by the permissions.
-            Permissions must be ordered read, add, update, process.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has been
-            specified in an associated stored access policy.
-        :param expiry:
-            The time at which the shared access signature becomes invalid.
-            Required unless an id is given referencing a stored access policy
-            which contains this field. This field must be omitted if it has
-            been specified in an associated stored access policy. Azure will always
-            convert values to UTC. If a date is passed in without timezone info, it
-            is assumed to be UTC.
-        :type expiry: datetime or str
-        :param start:
-            The time at which the shared access signature becomes valid. If
-            omitted, start time for this call is assumed to be the time when the
-            storage service receives the request. Azure will always convert values
-            to UTC. If a date is passed in without timezone info, it is assumed to
-            be UTC.
-        :type start: datetime or str
-        :param str policy_id:
-            A unique value up to 64 characters in length that correlates to a
-            stored access policy.
-        :param str ip:
-            Specifies an IP address or a range of IP addresses from which to accept requests.
-            If the IP address from which the request originates does not match the IP address
-            or address range specified on the SAS token, the request is not authenticated.
-            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-            restricts the request to those IP addresses.
-        :param str protocol:
-            Specifies the protocol permitted for a request made. The default value
-            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
-        '''
-        sas = _QueueSharedAccessHelper()
-        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
-        sas.add_id(policy_id)
-        sas.add_resource_signature(self.account_name, self.account_key, queue_name)
-
-        return sas.get_token()
-
-
-class _QueueSharedAccessHelper(_SharedAccessHelper):
-
-    def add_resource_signature(self, account_name, account_key, path):  # pylint: disable=arguments-differ
-        def get_value_to_append(query):
-            return_value = self.query_dict.get(query) or ''
-            return return_value + '\n'
-
-        if path[0] != '/':
-            path = '/' + path
-
-        canonicalized_resource = '/queue/' + account_name + path + '\n'
-
-        # Form the string to sign from shared_access_policy and canonicalized
-        # resource. The order of values is important.
-        string_to_sign = \
-            (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
-             get_value_to_append(QueryStringConstants.SIGNED_START) +
-             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
-             canonicalized_resource +
-             get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) +
-             get_value_to_append(QueryStringConstants.SIGNED_IP) +
-             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
-             get_value_to_append(QueryStringConstants.SIGNED_VERSION))
-
-        # remove the trailing newline
-        if string_to_sign[-1] == '\n':
-            string_to_sign = string_to_sign[:-1]
-
-        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
-                        sign_string(account_key, string_to_sign))
-
-
-def generate_account_sas(
-        account_name,  # type: str
-        account_key,  # type: str
-        resource_types,  # type: Union[ResourceTypes, str]
-        permission,  # type: Union[AccountSasPermissions, str]
-        expiry,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        ip=None,  # type: Optional[str]
-        **kwargs  # type: Any
-    ):  # type: (...) -> str
-    """Generates a shared access signature for the queue service.
-
-    Use the returned signature with the credential parameter of any Queue Service.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-    :param ~azure.storage.queue.ResourceTypes resource_types:
-        Specifies the resource types that are accessible with the account SAS.
-    :param ~azure.storage.queue.AccountSasPermissions permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless an id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-    """
-    sas = SharedAccessSignature(account_name, account_key)
-    return sas.generate_account(
-        services=Services(queue=True),
-        resource_types=resource_types,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        ip=ip,
-        **kwargs
-    ) # type: ignore
-
-
-def generate_queue_sas(
-        account_name,  # type: str
-        queue_name,  # type: str
-        account_key,  # type: str
-        permission=None,  # type: Optional[Union[QueueSasPermissions, str]]
-        expiry=None,  # type: Optional[Union[datetime, str]]
-        start=None,  # type: Optional[Union[datetime, str]]
-        policy_id=None,  # type: Optional[str]
-        ip=None,  # type: Optional[str]
-        **kwargs  # type: Any
-    ):  # type: (...) -> str
-    """Generates a shared access signature for a queue.
-
-    Use the returned signature with the credential parameter of any Queue Service.
-
-    :param str account_name:
-        The storage account name used to generate the shared access signature.
-    :param str queue_name:
-        The name of the queue.
-    :param str account_key:
-        The account key, also called shared key or access key, to generate the shared access signature.
-    :param ~azure.storage.queue.QueueSasPermissions permission:
-        The permissions associated with the shared access signature. The
-        user is restricted to operations allowed by the permissions.
-        Required unless a policy_id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has been
-        specified in an associated stored access policy.
-    :param expiry:
-        The time at which the shared access signature becomes invalid.
-        Required unless a policy_id is given referencing a stored access policy
-        which contains this field. This field must be omitted if it has
-        been specified in an associated stored access policy. Azure will always
-        convert values to UTC. If a date is passed in without timezone info, it
-        is assumed to be UTC.
-    :type expiry: ~datetime.datetime or str
-    :param start:
-        The time at which the shared access signature becomes valid. If
-        omitted, start time for this call is assumed to be the time when the
-        storage service receives the request. Azure will always convert values
-        to UTC. If a date is passed in without timezone info, it is assumed to
-        be UTC.
-    :type start: ~datetime.datetime or str
-    :param str policy_id:
-        A unique value up to 64 characters in length that correlates to a
-        stored access policy. To create a stored access policy, use
-        :func:`~azure.storage.queue.QueueClient.set_queue_access_policy`.
-    :param str ip:
-        Specifies an IP address or a range of IP addresses from which to accept requests.
-        If the IP address from which the request originates does not match the IP address
-        or address range specified on the SAS token, the request is not authenticated.
-        For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
-        restricts the request to those IP addresses.
-    :keyword str protocol:
-        Specifies the protocol permitted for a request made. The default value is https.
-    :return: A Shared Access Signature (sas) token.
-    :rtype: str
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/queue_samples_message.py
-            :start-after: [START queue_client_sas_token]
-            :end-before: [END queue_client_sas_token]
-            :language: python
-            :dedent: 12
-            :caption: Generate a sas token.
-    """
-    sas = QueueSharedAccessSignature(account_name, account_key)
-    return sas.generate_queue(
-        queue_name,
-        permission=permission,
-        expiry=expiry,
-        start=start,
-        policy_id=policy_id,
-        ip=ip,
-        **kwargs
-    )
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_version.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_version.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_version.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/_version.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,12 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-#
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is
-# regenerated.
-# --------------------------------------------------------------------------
-
-VERSION = "12.1.1"
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/__init__.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/__init__.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/__init__.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/__init__.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-from ._queue_client_async import QueueClient
-from ._queue_service_client_async import QueueServiceClient
-
-
-__all__ = [
-    'QueueClient',
-    'QueueServiceClient',
-]
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,96 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-# pylint: disable=too-few-public-methods, too-many-instance-attributes
-# pylint: disable=super-init-not-called
-
-from typing import List # pylint: disable=unused-import
-from azure.core.async_paging import AsyncPageIterator
-from .._shared.response_handlers import (
-    process_storage_error,
-    return_context_and_deserialized)
-from .._generated.models import StorageErrorException
-from .._models import QueueMessage, QueueProperties
-
-
-class MessagesPaged(AsyncPageIterator):
-    """An iterable of Queue Messages.
-
-    :param callable command: Function to retrieve the next page of items.
-    :param int results_per_page: The maximum number of messages to retrieve per
-        call.
-    """
-    def __init__(self, command, results_per_page=None, continuation_token=None):
-        if continuation_token is not None:
-            raise ValueError("This operation does not support continuation token")
-
-        super(MessagesPaged, self).__init__(
-            self._get_next_cb,
-            self._extract_data_cb,
-        )
-        self._command = command
-        self.results_per_page = results_per_page
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(number_of_messages=self.results_per_page)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, messages):
-        # There is no concept of continuation token, so raising on my own condition
-        if not messages:
-            raise StopAsyncIteration("End of paging")
-        return "TOKEN_IGNORED", [QueueMessage._from_generated(q) for q in messages]  # pylint: disable=protected-access
-
-
-class QueuePropertiesPaged(AsyncPageIterator):
-    """An iterable of Queue properties.
-
-    :ivar str service_endpoint: The service URL.
-    :ivar str prefix: A queue name prefix being used to filter the list.
-    :ivar str marker: The continuation token of the current page of results.
-    :ivar int results_per_page: The maximum number of results retrieved per API call.
-    :ivar str next_marker: The continuation token to retrieve the next page of results.
-    :ivar str location_mode: The location mode being used to list results. The available
-        options include "primary" and "secondary".
-    :param callable command: Function to retrieve the next page of items.
-    :param str prefix: Filters the results to return only queues whose names
-        begin with the specified prefix.
-    :param int results_per_page: The maximum number of queue names to retrieve per
-        call.
-    :param str continuation_token: An opaque continuation token.
-    """
-    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
-        super(QueuePropertiesPaged, self).__init__(
-            self._get_next_cb,
-            self._extract_data_cb,
-            continuation_token=continuation_token or ""
-        )
-        self._command = command
-        self.service_endpoint = None
-        self.prefix = prefix
-        self.marker = None
-        self.results_per_page = results_per_page
-        self.location_mode = None
-
-    async def _get_next_cb(self, continuation_token):
-        try:
-            return await self._command(
-                marker=continuation_token or None,
-                maxresults=self.results_per_page,
-                cls=return_context_and_deserialized,
-                use_location=self.location_mode)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    async def _extract_data_cb(self, get_next_return):
-        self.location_mode, self._response = get_next_return
-        self.service_endpoint = self._response.service_endpoint
-        self.prefix = self._response.prefix
-        self.marker = self._response.marker
-        self.results_per_page = self._response.max_results
-        props_list = [QueueProperties._from_generated(q) for q in self._response.queue_items] # pylint: disable=protected-access
-        return self._response.next_marker or None, props_list
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,694 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union,
-    Optional,
-    Any,
-    IO,
-    Iterable,
-    AnyStr,
-    Dict,
-    List,
-    Tuple,
-    TYPE_CHECKING,
-)
-
-try:
-    from urllib.parse import urlparse, quote, unquote  # pylint: disable=unused-import
-except ImportError:
-    from urlparse import urlparse  # type: ignore
-    from urllib2 import quote, unquote  # type: ignore
-
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.tracing.decorator_async import distributed_trace_async
-
-from azure.core.async_paging import AsyncItemPaged
-
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin
-from .._shared.request_handlers import add_metadata_headers, serialize_iso
-from .._shared.response_handlers import (
-    return_response_headers,
-    process_storage_error,
-    return_headers_and_deserialized,
-)
-from .._deserialize import deserialize_queue_properties, deserialize_queue_creation
-from .._generated.version import VERSION
-from .._generated.aio import AzureQueueStorage
-from .._generated.models import StorageErrorException, SignedIdentifier
-from .._generated.models import QueueMessage as GenQueueMessage
-
-from .._models import QueueMessage, AccessPolicy
-from ._models import MessagesPaged
-from .._shared.policies_async import ExponentialRetry
-from .._queue_client import QueueClient as QueueClientBase
-
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.pipeline.policies import HTTPPolicy
-    from .._models import QueueSasPermissions, QueueProperties
-
-
-class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase):
-    """A client to interact with a specific Queue.
-
-    :param str account_url:
-        The URL to the storage account. In order to create a client given the full URI to the queue,
-        use the :func:`from_queue_url` classmethod.
-    :param queue_name: The name of the queue.
-    :type queue_name: str
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-    :keyword encode_policy: The encoding policy to use on outgoing messages.
-        Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`,
-        :class:`BinaryBase64EncodePolicy` or `None`.
-    :keyword decode_policy: The decoding policy to use on incoming messages.
-        Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`,
-        :class:`BinaryBase64DecodePolicy` or `None`.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/queue_samples_message_async.py
-            :start-after: [START async_create_queue_client]
-            :end-before: [END async_create_queue_client]
-            :language: python
-            :dedent: 16
-            :caption: Create the queue client with url and credential.
-
-        .. literalinclude:: ../samples/queue_samples_message_async.py
-            :start-after: [START async_create_queue_client_from_connection_string]
-            :end-before: [END async_create_queue_client_from_connection_string]
-            :language: python
-            :dedent: 8
-            :caption: Create the queue client with a connection string.
-    """
-
-    def __init__(
-        self,
-        account_url,  # type: str
-        queue_name,  # type: str
-        credential=None,  # type: Optional[Any]
-        **kwargs  # type: Any
-    ):
-        # type: (...) -> None
-        kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
-        loop = kwargs.pop('loop', None)
-        super(QueueClient, self).__init__(
-            account_url, queue_name=queue_name, credential=credential, loop=loop, **kwargs
-        )
-        self._client = AzureQueueStorage(self.url, pipeline=self._pipeline, loop=loop)  # type: ignore
-        self._client._config.version = kwargs.get('api_version', VERSION)  # pylint: disable=protected-access
-        self._loop = loop
-
-    @distributed_trace_async
-    async def create_queue(self, **kwargs):
-        # type: (Optional[Any]) -> None
-        """Creates a new queue in the storage account.
-
-        If a queue with the same name already exists, the operation fails with
-        a `ResourceExistsError`.
-
-        :keyword dict(str,str) metadata:
-            A dict containing name-value pairs to associate with the queue as
-            metadata. Note that metadata names preserve the case with which they
-            were created, but are case-insensitive when set or read.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return: None or the result of cls(response)
-        :rtype: None
-        :raises: StorageErrorException
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_hello_world_async.py
-                :start-after: [START async_create_queue]
-                :end-before: [END async_create_queue]
-                :language: python
-                :dedent: 12
-                :caption: Create a queue.
-        """
-        metadata = kwargs.pop('metadata', None)
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop("headers", {})
-        headers.update(add_metadata_headers(metadata))  # type: ignore
-        try:
-            return await self._client.queue.create(  # type: ignore
-                metadata=metadata, timeout=timeout, headers=headers, cls=deserialize_queue_creation, **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def delete_queue(self, **kwargs):
-        # type: (Optional[Any]) -> None
-        """Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion
-        and is no longer accessible to clients. The queue is later removed from
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete.
-        If an operation is attempted against the queue while it was being deleted,
-        an :class:`HttpResponseError` will be thrown.
-
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_hello_world_async.py
-                :start-after: [START async_delete_queue]
-                :end-before: [END async_delete_queue]
-                :language: python
-                :dedent: 16
-                :caption: Delete a queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            await self._client.queue.delete(timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_queue_properties(self, **kwargs):
-        # type: (Optional[Any]) -> QueueProperties
-        """Returns all user-defined metadata for the specified queue.
-
-        The data returned does not include the queue's list of messages.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: User-defined metadata for the queue.
-        :rtype: ~azure.storage.queue.QueueProperties
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_get_queue_properties]
-                :end-before: [END async_get_queue_properties]
-                :language: python
-                :dedent: 16
-                :caption: Get the properties on the queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            response = await self._client.queue.get_properties(
-                timeout=timeout, cls=deserialize_queue_properties, **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-        response.name = self.queue_name
-        return response  # type: ignore
-
-    @distributed_trace_async
-    async def set_queue_metadata(self, metadata=None, **kwargs):
-        # type: (Optional[Dict[str, Any]], Optional[Any]) -> None
-        """Sets user-defined metadata on the specified queue.
-
-        Metadata is associated with the queue as name-value pairs.
-
-        :param metadata:
-            A dict containing name-value pairs to associate with the
-            queue as metadata.
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_set_queue_metadata]
-                :end-before: [END async_set_queue_metadata]
-                :language: python
-                :dedent: 16
-                :caption: Set metadata on the queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        headers = kwargs.pop("headers", {})
-        headers.update(add_metadata_headers(metadata))  # type: ignore
-        try:
-            return await self._client.queue.set_metadata(  # type: ignore
-                timeout=timeout, headers=headers, cls=return_response_headers, **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_queue_access_policy(self, **kwargs):
-        # type: (Optional[Any]) -> Dict[str, Any]
-        """Returns details about any stored access policies specified on the
-        queue that may be used with Shared Access Signatures.
-
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return: A dictionary of access policies associated with the queue.
-        :rtype: dict(str, ~azure.storage.queue.AccessPolicy)
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            _, identifiers = await self._client.queue.get_access_policy(
-                timeout=timeout, cls=return_headers_and_deserialized, **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
-        return {s.id: s.access_policy or AccessPolicy() for s in identifiers}
-
-    @distributed_trace_async
-    async def set_queue_access_policy(self, signed_identifiers, **kwargs):
-        # type: (Dict[str, AccessPolicy], Optional[Any]) -> None
-        """Sets stored access policies for the queue that may be used with Shared
-        Access Signatures.
-
-        When you set permissions for a queue, the existing permissions are replaced.
-        To update the queue's permissions, call :func:`~get_queue_access_policy` to fetch
-        all access policies associated with the queue, modify the access policy
-        that you wish to change, and then call this function with the complete
-        set of data to perform the update.
-
-        When you establish a stored access policy on a queue, it may take up to
-        30 seconds to take effect. During this interval, a shared access signature
-        that is associated with the stored access policy will throw an
-        :class:`HttpResponseError` until the access policy becomes active.
-
-        :param signed_identifiers:
-            SignedIdentifier access policies to associate with the queue.
-            This may contain up to 5 elements. An empty dict
-            will clear the access policies set on the service.
-        :type signed_identifiers: dict(str, ~azure.storage.queue.AccessPolicy)
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_set_access_policy]
-                :end-before: [END async_set_access_policy]
-                :language: python
-                :dedent: 16
-                :caption: Set an access policy on the queue.
-        """
-        timeout = kwargs.pop('timeout', None)
-        if len(signed_identifiers) > 15:
-            raise ValueError(
-                "Too many access policies provided. The server does not support setting "
-                "more than 15 access policies on a single resource."
-            )
-        identifiers = []
-        for key, value in signed_identifiers.items():
-            if value:
-                value.start = serialize_iso(value.start)
-                value.expiry = serialize_iso(value.expiry)
-            identifiers.append(SignedIdentifier(id=key, access_policy=value))
-        signed_identifiers = identifiers  # type: ignore
-        try:
-            await self._client.queue.set_access_policy(queue_acl=signed_identifiers or None, timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def send_message(  # type: ignore
-        self,
-        content,  # type: Any
-        **kwargs  # type: Optional[Any]
-    ):
-        # type: (...) -> QueueMessage
-        """Adds a new message to the back of the message queue.
-
-        The visibility timeout specifies the time that the message will be
-        invisible. After the timeout expires, the message will become visible.
-        If a visibility timeout is not specified, the default value of 0 is used.
-
-        The message time-to-live specifies how long a message will remain in the
-        queue. The message will be deleted from the queue when the time-to-live
-        period expires.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function
-            set on the service. Default is str. The encoded message can be up to
-            64KB in size.
-        :keyword int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :keyword int time_to_live:
-            Specifies the time-to-live interval for the message, in
-            seconds. The time-to-live may be any positive number or -1 for infinity. If this
-            parameter is omitted, the default time-to-live is 7 days.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.QueueMessage` object.
-            This object is also populated with the content although it is not
-            returned from the service.
-        :rtype: ~azure.storage.queue.QueueMessage
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_send_messages]
-                :end-before: [END async_send_messages]
-                :language: python
-                :dedent: 16
-                :caption: Send messages.
-        """
-        visibility_timeout = kwargs.pop('visibility_timeout', None)
-        time_to_live = kwargs.pop('time_to_live', None)
-        timeout = kwargs.pop('timeout', None)
-        self._config.message_encode_policy.configure(
-            require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            resolver=self.key_resolver_function
-        )
-        content = self._config.message_encode_policy(content)
-        new_message = GenQueueMessage(message_text=content)
-
-        try:
-            enqueued = await self._client.messages.enqueue(
-                queue_message=new_message,
-                visibilitytimeout=visibility_timeout,
-                message_time_to_live=time_to_live,
-                timeout=timeout,
-                **kwargs
-            )
-            queue_message = QueueMessage(content=new_message.message_text)
-            queue_message.id = enqueued[0].message_id
-            queue_message.inserted_on = enqueued[0].insertion_time
-            queue_message.expires_on = enqueued[0].expiration_time
-            queue_message.pop_receipt = enqueued[0].pop_receipt
-            queue_message.next_visible_on = enqueued[0].time_next_visible
-            return queue_message
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def receive_messages(self, **kwargs):
-        # type: (Optional[Any]) -> AsyncItemPaged[QueueMessage]
-        """Removes one or more messages from the front of the queue.
-
-        When a message is retrieved from the queue, the response includes the message
-        content and a pop_receipt value, which is required to delete the message.
-        The message is not automatically deleted from the queue, but after it has
-        been retrieved, it is not visible to other clients for the time interval
-        specified by the visibility_timeout parameter.
-
-        If the key-encryption-key or resolver field is set on the local service object, the messages will be
-        decrypted before being returned.
-
-        :keyword int messages_per_page:
-            A nonzero integer value that specifies the number of
-            messages to retrieve from the queue, up to a maximum of 32. If
-            fewer are visible, the visible messages are returned. By default,
-            a single message is retrieved from the queue with this operation.
-        :keyword int visibility_timeout:
-            If not specified, the default value is 0. Specifies the
-            new visibility timeout value, in seconds, relative to server time.
-            The value must be larger than or equal to 0, and cannot be
-            larger than 7 days. The visibility timeout of a message cannot be
-            set to a value later than the expiry time. visibility_timeout
-            should be set to a value smaller than the time-to-live value.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            Returns a message iterator of dict-like Message objects.
-        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.queue.QueueMessage]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_receive_messages]
-                :end-before: [END async_receive_messages]
-                :language: python
-                :dedent: 16
-                :caption: Receive messages from the queue.
-        """
-        messages_per_page = kwargs.pop('messages_per_page', None)
-        visibility_timeout = kwargs.pop('visibility_timeout', None)
-        timeout = kwargs.pop('timeout', None)
-        self._config.message_decode_policy.configure(
-            require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            resolver=self.key_resolver_function
-        )
-        try:
-            command = functools.partial(
-                self._client.messages.dequeue,
-                visibilitytimeout=visibility_timeout,
-                timeout=timeout,
-                cls=self._config.message_decode_policy,
-                **kwargs
-            )
-            return AsyncItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def update_message(
-        self,
-        message,
-        pop_receipt=None,
-        content=None,
-        **kwargs
-    ):
-        # type: (Any, int, Optional[str], Optional[Any], Any) -> QueueMessage
-        """Updates the visibility timeout of a message. You can also use this
-        operation to update the contents of a message.
-
-        This operation can be used to continually extend the invisibility of a
-        queue message. This functionality can be useful if you want a worker role
-        to "lease" a queue message. For example, if a worker role calls :func:`~receive_messages()`
-        and recognizes that it needs more time to process a message, it can
-        continually extend the message's invisibility until it is processed. If
-        the worker role were to fail during processing, eventually the message
-        would become visible again and another worker role could process it.
-
-        If the key-encryption-key field is set on the local service object, this method will
-        encrypt the content before uploading.
-
-        :param message:
-            The message object or id identifying the message to update.
-        :type message: str or ~azure.storage.queue.QueueMessage
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~receive_messages` or :func:`~update_message` operation.
-        :param obj content:
-            Message content. Allowed type is determined by the encode_function
-            set on the service. Default is str.
-        :keyword int visibility_timeout:
-            Specifies the new visibility timeout value, in seconds,
-            relative to server time. The new value must be larger than or equal
-            to 0, and cannot be larger than 7 days. The visibility timeout of a
-            message cannot be set to a value later than the expiry time. A
-            message can be updated until it has been deleted or has expired.
-            The message object or message id identifying the message to update.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A :class:`~azure.storage.queue.QueueMessage` object. For convenience,
-            this object is also populated with the content, although it is not returned by the service.
-        :rtype: ~azure.storage.queue.QueueMessage
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_update_message]
-                :end-before: [END async_update_message]
-                :language: python
-                :dedent: 16
-                :caption: Update a message.
-        """
-        visibility_timeout = kwargs.pop('visibility_timeout', None)
-        timeout = kwargs.pop('timeout', None)
-        try:
-            message_id = message.id
-            message_text = content or message.content
-            receipt = pop_receipt or message.pop_receipt
-            inserted_on = message.inserted_on
-            expires_on = message.expires_on
-            dequeue_count = message.dequeue_count
-        except AttributeError:
-            message_id = message
-            message_text = content
-            receipt = pop_receipt
-            inserted_on = None
-            expires_on = None
-            dequeue_count = None
-
-        if receipt is None:
-            raise ValueError("pop_receipt must be present")
-        if message_text is not None:
-            self._config.message_encode_policy.configure(
-                self.require_encryption, self.key_encryption_key, self.key_resolver_function
-            )
-            message_text = self._config.message_encode_policy(message_text)
-            updated = GenQueueMessage(message_text=message_text)
-        else:
-            updated = None  # type: ignore
-        try:
-            response = await self._client.message_id.update(
-                queue_message=updated,
-                visibilitytimeout=visibility_timeout or 0,
-                timeout=timeout,
-                pop_receipt=receipt,
-                cls=return_response_headers,
-                queue_message_id=message_id,
-                **kwargs
-            )
-            new_message = QueueMessage(content=message_text)
-            new_message.id = message_id
-            new_message.inserted_on = inserted_on
-            new_message.expires_on = expires_on
-            new_message.dequeue_count = dequeue_count
-            new_message.pop_receipt = response["popreceipt"]
-            new_message.next_visible_on = response["time_next_visible"]
-            return new_message
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def peek_messages(self, max_messages=None, **kwargs):
-        # type: (Optional[int], Optional[Any]) -> List[QueueMessage]
-        """Retrieves one or more messages from the front of the queue, but does
-        not alter the visibility of the message.
-
-        Only messages that are visible may be retrieved. When a message is retrieved
-        for the first time with a call to :func:`~receive_messages`, its dequeue_count property
-        is set to 1. If it is not deleted and is subsequently retrieved again, the
-        dequeue_count property is incremented. The client may use this value to
-        determine how many times a message has been retrieved. Note that a call
-        to peek_messages does not increment the value of dequeue_count, but returns
-        this value for the client to read.
-
-        If the key-encryption-key or resolver field is set on the local service object,
-        the messages will be decrypted before being returned.
-
-        :param int max_messages:
-            A nonzero integer value that specifies the number of
-            messages to peek from the queue, up to a maximum of 32. By default,
-            a single message is peeked from the queue with this operation.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-        :return:
-            A list of :class:`~azure.storage.queue.QueueMessage` objects. Note that
-            next_visible_on and pop_receipt will not be populated as peek does
-            not pop the message and can only retrieve already visible messages.
-        :rtype: list(:class:`~azure.storage.queue.QueueMessage`)
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_peek_message]
-                :end-before: [END async_peek_message]
-                :language: python
-                :dedent: 16
-                :caption: Peek messages.
-        """
-        timeout = kwargs.pop('timeout', None)
-        if max_messages and not 1 <= max_messages <= 32:
-            raise ValueError("Number of messages to peek should be between 1 and 32")
-        self._config.message_decode_policy.configure(
-            require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key,
-            resolver=self.key_resolver_function
-        )
-        try:
-            messages = await self._client.messages.peek(
-                number_of_messages=max_messages, timeout=timeout, cls=self._config.message_decode_policy, **kwargs
-            )
-            wrapped_messages = []
-            for peeked in messages:
-                wrapped_messages.append(QueueMessage._from_generated(peeked))  # pylint: disable=protected-access
-            return wrapped_messages
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def clear_messages(self, **kwargs):
-        # type: (Optional[Any]) -> None
-        """Deletes all messages from the specified queue.
-
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_clear_messages]
-                :end-before: [END async_clear_messages]
-                :language: python
-                :dedent: 16
-                :caption: Clears all messages.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            await self._client.messages.clear(timeout=timeout, **kwargs)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def delete_message(self, message, pop_receipt=None, **kwargs):
-        # type: (Any, Optional[str], Any) -> None
-        """Deletes the specified message.
-
-        Normally after a client retrieves a message with the receive messages operation,
-        the client is expected to process and delete the message. To delete the
-        message, you must have the message object itself, or two items of data: id and pop_receipt.
-        The id is returned from the previous receive_messages operation. The
-        pop_receipt is returned from the most recent :func:`~receive_messages` or
-        :func:`~update_message` operation. In order for the delete_message operation
-        to succeed, the pop_receipt specified on the request must match the
-        pop_receipt returned from the :func:`~receive_messages` or :func:`~update_message`
-        operation.
-
-        :param message:
-            The message object or id identifying the message to delete.
-        :type message: str or ~azure.storage.queue.QueueMessage
-        :param str pop_receipt:
-            A valid pop receipt value returned from an earlier call
-            to the :func:`~receive_messages` or :func:`~update_message`.
-        :keyword int timeout:
-            The server timeout, expressed in seconds.
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_message_async.py
-                :start-after: [START async_delete_message]
-                :end-before: [END async_delete_message]
-                :language: python
-                :dedent: 16
-                :caption: Delete a message.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            message_id = message.id
-            receipt = pop_receipt or message.pop_receipt
-        except AttributeError:
-            message_id = message
-            receipt = pop_receipt
-
-        if receipt is None:
-            raise ValueError("pop_receipt must be present")
-        try:
-            await self._client.message_id.delete(
-                pop_receipt=receipt, timeout=timeout, queue_message_id=message_id, **kwargs
-            )
-        except StorageErrorException as error:
-            process_storage_error(error)
diff -pruN 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py
--- 1.4.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,384 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-# --------------------------------------------------------------------------
-
-import functools
-from typing import (  # pylint: disable=unused-import
-    Union, Optional, Any, Iterable, Dict, List,
-    TYPE_CHECKING)
-try:
-    from urllib.parse import urlparse # pylint: disable=unused-import
-except ImportError:
-    from urlparse import urlparse # type: ignore
-
-from azure.core.async_paging import AsyncItemPaged
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.pipeline import AsyncPipeline
-from azure.core.tracing.decorator_async import distributed_trace_async
-
-from .._shared.policies_async import ExponentialRetry
-from .._queue_service_client import QueueServiceClient as QueueServiceClientBase
-from .._shared.models import LocationMode
-from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
-from .._shared.response_handlers import process_storage_error
-from .._generated.version import VERSION
-from .._generated.aio import AzureQueueStorage
-from .._generated.models import StorageServiceProperties, StorageErrorException
-
-from ._models import QueuePropertiesPaged
-from ._queue_client_async import QueueClient
-from .._models import (
-    service_stats_deserialize,
-    service_properties_deserialize,
-)
-
-if TYPE_CHECKING:
-    from datetime import datetime
-    from azure.core.configuration import Configuration
-    from azure.core.pipeline.policies import HTTPPolicy
-    from .._models import (
-        QueueProperties,
-        QueueAnalyticsLogging,
-        Metrics,
-        CorsRule,
-    )
-
-
-class QueueServiceClient(AsyncStorageAccountHostsMixin, QueueServiceClientBase):
-    """A client to interact with the Queue Service at the account level.
-
-    This client provides operations to retrieve and configure the account properties
-    as well as list, create and delete queues within the account.
-    For operations relating to a specific queue, a client for this entity
-    can be retrieved using the :func:`~get_queue_client` function.
-
-    :param str account_url:
-        The URL to the queue service endpoint. Any other entities included
-        in the URL path (e.g. queue) will be discarded. This URL can be optionally
-        authenticated with a SAS token.
-    :param credential:
-        The credentials with which to authenticate. This is optional if the
-        account URL already has a SAS token. The value can be a SAS token string, an account
-        shared access key, or an instance of a TokenCredentials class from azure.identity.
-    :keyword str api_version:
-        The Storage API version to use for requests. Default value is '2019-07-07'.
-        Setting to an older version may result in reduced feature compatibility.
-    :keyword str secondary_hostname:
-        The hostname of the secondary endpoint.
-
-    .. admonition:: Example:
-
-        .. literalinclude:: ../samples/queue_samples_authentication_async.py
-            :start-after: [START async_create_queue_service_client]
-            :end-before: [END async_create_queue_service_client]
-            :language: python
-            :dedent: 8
-            :caption: Creating the QueueServiceClient with an account url and credential.
-
-        .. literalinclude:: ../samples/queue_samples_authentication_async.py
-            :start-after: [START async_create_queue_service_client_token]
-            :end-before: [END async_create_queue_service_client_token]
-            :language: python
-            :dedent: 8
-            :caption: Creating the QueueServiceClient with Azure Identity credentials.
-    """
-
-    def __init__(
-            self, account_url,  # type: str
-            credential=None,  # type: Optional[Any]
-            **kwargs  # type: Any
-        ):
-        # type: (...) -> None
-        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
-        loop = kwargs.pop('loop', None)
-        super(QueueServiceClient, self).__init__( # type: ignore
-            account_url,
-            credential=credential,
-            loop=loop,
-            **kwargs)
-        self._client = AzureQueueStorage(url=self.url, pipeline=self._pipeline, loop=loop) # type: ignore
-        self._client._config.version = kwargs.get('api_version', VERSION)  # pylint: disable=protected-access
-        self._loop = loop
-
-    @distributed_trace_async
-    async def get_service_stats(self, **kwargs):
-        # type: (Optional[Any]) -> Dict[str, Any]
-        """Retrieves statistics related to replication for the Queue service.
-
-        It is only available when read-access geo-redundant replication is enabled for
-        the storage account.
-
-        With geo-redundant replication, Azure Storage maintains your data durable
-        in two locations. In both locations, Azure Storage constantly maintains
-        multiple healthy replicas of your data. The location where you read,
-        create, update, or delete data is the primary storage account location.
-        The primary location exists in the region you choose at the time you
-        create an account via the Azure Management Azure classic portal, for
-        example, North Central US. The location to which your data is replicated
-        is the secondary location. The secondary location is automatically
-        determined based on the location of the primary; it is in a second data
-        center that resides in the same region as the primary location. Read-only
-        access is available from the secondary location, if read-access geo-redundant
-        replication is enabled for your storage account.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :return: The queue service stats.
-        :rtype: Dict[str, Any]
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            stats = await self._client.service.get_statistics( # type: ignore
-                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
-            return service_stats_deserialize(stats)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def get_service_properties(self, **kwargs):
-        # type: (Optional[Any]) -> Dict[str, Any]
-        """Gets the properties of a storage account's Queue service, including
-        Azure Storage Analytics.
-
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :returns: An object containing queue service properties such as
-            analytics logging, hour/minute metrics, cors rules, etc.
-        :rtype: Dict[str, Any]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service_async.py
-                :start-after: [START async_get_queue_service_properties]
-                :end-before: [END async_get_queue_service_properties]
-                :language: python
-                :dedent: 12
-                :caption: Getting queue service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        try:
-            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) # type: ignore
-            return service_properties_deserialize(service_props)
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace_async
-    async def set_service_properties( # type: ignore
-            self, analytics_logging=None,  # type: Optional[QueueAnalyticsLogging]
-            hour_metrics=None,  # type: Optional[Metrics]
-            minute_metrics=None,  # type: Optional[Metrics]
-            cors=None,  # type: Optional[List[CorsRule]]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Sets the properties of a storage account's Queue service, including
-        Azure Storage Analytics.
-
-        If an element (e.g. analytics_logging) is left as None, the
-        existing settings on the service for that functionality are preserved.
-
-        :param analytics_logging:
-            Groups the Azure Analytics Logging settings.
-        :type analytics_logging: ~azure.storage.queue.QueueAnalyticsLogging
-        :param hour_metrics:
-            The hour metrics settings provide a summary of request
-            statistics grouped by API in hourly aggregates for queues.
-        :type hour_metrics: ~azure.storage.queue.Metrics
-        :param minute_metrics:
-            The minute metrics settings provide request statistics
-            for each minute for queues.
-        :type minute_metrics: ~azure.storage.queue.Metrics
-        :param cors:
-            You can include up to five CorsRule elements in the
-            list. If an empty list is specified, all CORS rules will be deleted,
-            and CORS will be disabled for the service.
-        :type cors: list(~azure.storage.queue.CorsRule)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service_async.py
-                :start-after: [START async_set_queue_service_properties]
-                :end-before: [END async_set_queue_service_properties]
-                :language: python
-                :dedent: 12
-                :caption: Setting queue service properties.
-        """
-        timeout = kwargs.pop('timeout', None)
-        props = StorageServiceProperties(
-            logging=analytics_logging,
-            hour_metrics=hour_metrics,
-            minute_metrics=minute_metrics,
-            cors=cors
-        )
-        try:
-            return await self._client.service.set_properties(props, timeout=timeout, **kwargs) # type: ignore
-        except StorageErrorException as error:
-            process_storage_error(error)
-
-    @distributed_trace
-    def list_queues(
-            self, name_starts_with=None,  # type: Optional[str]
-            include_metadata=False,  # type: Optional[bool]
-            **kwargs
-        ):  # type: (...) -> AsyncItemPaged
-        """Returns a generator to list the queues under the specified account.
-
-        The generator will lazily follow the continuation tokens returned by
-        the service and stop when all queues have been returned.
-
-        :param str name_starts_with:
-            Filters the results to return only queues whose names
-            begin with the specified prefix.
-        :param bool include_metadata:
-            Specifies that queue metadata be returned in the response.
-        :keyword int results_per_page:
-            The maximum number of queue names to retrieve per API
-            call. If the request does not specify the server will return up to 5,000 items.
-        :keyword int timeout:
-            The server timeout, expressed in seconds. This function may make multiple
-            calls to the service in which case the timeout value specified will be
-            applied to each individual call.
-        :returns: An iterable (auto-paging) of QueueProperties.
-        :rtype: ~azure.core.paging.AsyncItemPaged[~azure.storage.queue.QueueProperties]
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service_async.py
-                :start-after: [START async_qsc_list_queues]
-                :end-before: [END async_qsc_list_queues]
-                :language: python
-                :dedent: 16
-                :caption: List queues in the service.
-        """
-        results_per_page = kwargs.pop('results_per_page', None)
-        timeout = kwargs.pop('timeout', None)
-        include = ['metadata'] if include_metadata else None
-        command = functools.partial(
-            self._client.service.list_queues_segment,
-            prefix=name_starts_with,
-            include=include,
-            timeout=timeout,
-            **kwargs)
-        return AsyncItemPaged(
-            command, prefix=name_starts_with, results_per_page=results_per_page,
-            page_iterator_class=QueuePropertiesPaged
-        )
-
-    @distributed_trace_async
-    async def create_queue( # type: ignore
-            self, name,  # type: str
-            metadata=None,  # type: Optional[Dict[str, str]]
-            **kwargs
-        ):
-        # type: (...) -> QueueClient
-        """Creates a new queue under the specified account.
-
-        If a queue with the same name already exists, the operation fails.
-        Returns a client with which to interact with the newly created queue.
-
-        :param str name: The name of the queue to create.
-        :param metadata:
-            A dict with name_value pairs to associate with the
-            queue as metadata. Example: {'Category': 'test'}
-        :type metadata: dict(str, str)
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: ~azure.storage.queue.aio.QueueClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service_async.py
-                :start-after: [START async_qsc_create_queue]
-                :end-before: [END async_qsc_create_queue]
-                :language: python
-                :dedent: 12
-                :caption: Create a queue in the service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        queue = self.get_queue_client(name)
-        kwargs.setdefault('merge_span', True)
-        await queue.create_queue(
-            metadata=metadata, timeout=timeout, **kwargs)
-        return queue
-
-    @distributed_trace_async
-    async def delete_queue( # type: ignore
-            self, queue,  # type: Union[QueueProperties, str]
-            **kwargs
-        ):
-        # type: (...) -> None
-        """Deletes the specified queue and any messages it contains.
-
-        When a queue is successfully deleted, it is immediately marked for deletion
-        and is no longer accessible to clients. The queue is later removed from
-        the Queue service during garbage collection.
-
-        Note that deleting a queue is likely to take at least 40 seconds to complete.
-        If an operation is attempted against the queue while it was being deleted,
-        an :class:`HttpResponseError` will be thrown.
-
-        :param queue:
-            The queue to delete. This can either be the name of the queue,
-            or an instance of QueueProperties.
-        :type queue: str or ~azure.storage.queue.QueueProperties
-        :keyword int timeout:
-            The timeout parameter is expressed in seconds.
-        :rtype: None
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service_async.py
-                :start-after: [START async_qsc_delete_queue]
-                :end-before: [END async_qsc_delete_queue]
-                :language: python
-                :dedent: 16
-                :caption: Delete a queue in the service.
-        """
-        timeout = kwargs.pop('timeout', None)
-        queue_client = self.get_queue_client(queue)
-        kwargs.setdefault('merge_span', True)
-        await queue_client.delete_queue(timeout=timeout, **kwargs)
-
-    def get_queue_client(self, queue, **kwargs):
-        # type: (Union[QueueProperties, str], Optional[Any]) -> QueueClient
-        """Get a client to interact with the specified queue.
-
-        The queue need not already exist.
-
-        :param queue:
-            The queue. This can either be the name of the queue,
-            or an instance of QueueProperties.
-        :type queue: str or ~azure.storage.queue.QueueProperties
-        :returns: A :class:`~azure.storage.queue.aio.QueueClient` object.
-        :rtype: ~azure.storage.queue.aio.QueueClient
-
-        .. admonition:: Example:
-
-            .. literalinclude:: ../samples/queue_samples_service_async.py
-                :start-after: [START async_get_queue_client]
-                :end-before: [END async_get_queue_client]
-                :language: python
-                :dedent: 8
-                :caption: Get the queue client.
-        """
-        try:
-            queue_name = queue.name
-        except AttributeError:
-            queue_name = queue
-
-        _pipeline = AsyncPipeline(
-            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
-            policies=self._pipeline._impl_policies # pylint: disable = protected-access
-        )
-
-        return QueueClient(
-            self.url, queue_name=queue_name, credential=self.credential,
-            key_resolver_function=self.key_resolver_function, require_encryption=self.require_encryption,
-            key_encryption_key=self.key_encryption_key, api_version=self.api_version, _pipeline=_pipeline,
-            _configuration=self._config, _location_mode=self._location_mode,
-            _hosts=self._hosts, loop=self._loop, **kwargs)
diff -pruN 1.4.0-1/azure-pipeline.yml 1.5.0-1/azure-pipeline.yml
--- 1.4.0-1/azure-pipeline.yml	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/azure-pipeline.yml	2025-06-18 05:27:42.000000000 +0000
@@ -17,15 +17,17 @@ jobs:
     timeoutInMinutes: 10
 
     pool:
-      name: 'pool-ubuntu-2004'
+      name: 'pool-ubuntu-2204'
     strategy:
       matrix:
-        Python37:
-          python.version: '3.7'
-        Python38:
-          python.version: '3.8'
+        Python39:
+          python.version: '3.9'
         Python310:
           python.version: '3.10'
+        Python311:
+          python.version: '3.11'
+        Python312:
+          python.version: '3.12'
     steps:
       - task: UsePythonVersion@0
         displayName: 'Use Python $(python.version)'
@@ -40,12 +42,12 @@ jobs:
   - job: BuildPythonWheel
     condition: succeeded()
     pool:
-      name: 'pool-ubuntu-2004'
+      name: 'pool-ubuntu-2204'
     steps:
       - task: UsePythonVersion@0
-        displayName: Use Python 3.7
+        displayName: Use Python 3.12
         inputs:
-          versionSpec: 3.7
+          versionSpec: 3.12
       - bash: |
           set -ev
 
diff -pruN 1.4.0-1/debian/changelog 1.5.0-1/debian/changelog
--- 1.4.0-1/debian/changelog	2025-03-19 13:01:01.000000000 +0000
+++ 1.5.0-1/debian/changelog	2025-08-10 10:24:29.000000000 +0000
@@ -1,3 +1,12 @@
+azure-multiapi-storage-python (1.5.0-1) unstable; urgency=medium
+
+  * Update upstream source from tag 'upstream/1.5.0'
+  * autopkgtest: update import name
+  * Update d/copyright
+  * Drop source lintian override, no longer needed
+
+ -- Luca Boccassi <bluca@debian.org>  Sun, 10 Aug 2025 11:24:29 +0100
+
 azure-multiapi-storage-python (1.4.0-1) unstable; urgency=medium
 
   * Update upstream source from tag 'upstream/1.4.0'
diff -pruN 1.4.0-1/debian/copyright 1.5.0-1/debian/copyright
--- 1.4.0-1/debian/copyright	2023-01-12 11:28:07.000000000 +0000
+++ 1.5.0-1/debian/copyright	2025-08-10 10:24:29.000000000 +0000
@@ -4,15 +4,6 @@ Files: *
 Copyright: 2017-2023 Microsoft Corporation
 License: Expat
 
-Files: azure/multiapi/cosmosdb/*
-       azure/multiapi/storage/v2015_04_05/*
-       azure/multiapi/storage/v2017_04_17/*
-       azure/multiapi/storage/v2017_11_09/__init__.py
-       azure/multiapi/storage/v2018_11_09/__init__.py
-       setup.py
-Copyright: 2017-2023 Microsoft Corporation
-License: Apache-2.0
-
 Files: debian/*
 Copyright: 2020-2023 Luca Boccassi <luca.boccassi@microsoft.com>
 License: Expat
@@ -35,22 +26,3 @@ License: Expat
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  SOFTWARE
-
-License: Apache-2.0
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
- .
-      https://www.apache.org/licenses/LICENSE-2.0
- .
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- .
- On Debian systems, the full text of the Apache Software License version 2 can
- be found in the file `/usr/share/common-licenses/Apache-2.0'.
diff -pruN 1.4.0-1/debian/source/lintian-overrides 1.5.0-1/debian/source/lintian-overrides
--- 1.4.0-1/debian/source/lintian-overrides	2023-01-12 10:32:54.000000000 +0000
+++ 1.5.0-1/debian/source/lintian-overrides	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-azure-multiapi-storage-python source: very-long-line-length-in-source-file
diff -pruN 1.4.0-1/debian/tests/pkg-python/import-name 1.5.0-1/debian/tests/pkg-python/import-name
--- 1.4.0-1/debian/tests/pkg-python/import-name	2023-01-12 10:32:54.000000000 +0000
+++ 1.5.0-1/debian/tests/pkg-python/import-name	2025-08-10 10:24:29.000000000 +0000
@@ -1 +1 @@
-azure.multiapi.storage
+azure.multiapi.storagev2
diff -pruN 1.4.0-1/setup.py 1.5.0-1/setup.py
--- 1.4.0-1/setup.py	2025-03-17 06:32:34.000000000 +0000
+++ 1.5.0-1/setup.py	2025-06-18 05:27:42.000000000 +0000
@@ -1,19 +1,9 @@
 ﻿#!/usr/bin/env python
 
-#-------------------------------------------------------------------------
-# Copyright (c) Microsoft.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
 
 from setuptools import find_packages, setup
 import sys
@@ -35,7 +25,7 @@ except ImportError:
 
 setup(
     name='azure-multiapi-storage',
-    version='1.4.0',
+    version='1.5.0',
     description='Microsoft Azure Storage Client Library for Python with multi API version support.',
     long_description=open('README.rst', 'r').read(),
     license='MIT',
@@ -43,18 +33,15 @@ setup(
     author_email='azpycli@microsoft.com',
     url='https://github.com/Azure/azure-multiapi-storage-python',
     classifiers=[
-        'Development Status :: 4 - Beta',
+        'Development Status :: 5 - Production/Stable',
+        'Intended Audience :: Developers',
         'Programming Language :: Python',
-        'Programming Language :: Python :: 2',
-        'Programming Language :: Python :: 2.7',
         'Programming Language :: Python :: 3',
-        'Programming Language :: Python :: 3.3',
-        'Programming Language :: Python :: 3.4',
-        'Programming Language :: Python :: 3.5',
-        'Programming Language :: Python :: 3.6',
-        'Programming Language :: Python :: 3.7',
-        'Programming Language :: Python :: 3.8',
-        'License :: OSI Approved :: Apache Software License',
+        'Programming Language :: Python :: 3.9',
+        'Programming Language :: Python :: 3.10',
+        'Programming Language :: Python :: 3.11',
+        'Programming Language :: Python :: 3.12',
+        'License :: OSI Approved :: MIT License',
     ],
     zip_safe=False,
     packages=find_packages(exclude=["azure"]),
@@ -66,8 +53,5 @@ setup(
         "msrest>=0.6.18",
         "cryptography>=2.1.4"
     ],
-    extras_require={
-        ':python_version=="2.7"': ['futures'],
-        ':python_version<"3.0"': ['azure-nspkg'],
-    },
+    python_requires=">=3.9.0"
 )
