diff --git a/src/build.bat b/src/build.bat
new file mode 100644
index 000000000000..17d39bcde4af
--- /dev/null
+++ b/src/build.bat
@@ -0,0 +1,16 @@
+@echo OFF
+REM----------------------------------------------------------------------------
+REM Copyright (c) Microsoft Corporation.
+REM
+REM This source code is subject to terms and conditions of the Apache License,
+REM Version 2.0. A copy of the license can be found in the License.html file at
+REM the root of this distribution. If you cannot locate the Apache License,
+REM Version 2.0, please send an email to vspython@microsoft.com. By using this
+REM source code in any fashion, you are agreeing to be bound by the terms of the
+REM Apache License, Version 2.0.
+REM
+REM You must not remove this notice, or any other, from this software.
+REM----------------------------------------------------------------------------
+cls
+
+%SystemDrive%\Python27\python.exe setup.py sdist
\ No newline at end of file
diff --git a/src/install.bat b/src/install.bat
new file mode 100644
index 000000000000..f0a169369c8b
--- /dev/null
+++ b/src/install.bat
@@ -0,0 +1,16 @@
+@echo OFF
+REM----------------------------------------------------------------------------
+REM Copyright (c) Microsoft Corporation.
+REM
+REM This source code is subject to terms and conditions of the Apache License,
+REM Version 2.0. A copy of the license can be found in the License.html file at
+REM the root of this distribution. If you cannot locate the Apache License,
+REM Version 2.0, please send an email to vspython@microsoft.com. By using this
+REM source code in any fashion, you are agreeing to be bound by the terms of the
+REM Apache License, Version 2.0.
+REM
+REM You must not remove this notice, or any other, from this software.
+REM----------------------------------------------------------------------------
+cls
+
+%SystemDrive%\Python27\python.exe setup.py install
\ No newline at end of file
diff --git a/src/installfrompip.bat b/src/installfrompip.bat
new file mode 100644
index 000000000000..ce8b64850161
--- /dev/null
+++ b/src/installfrompip.bat
@@ -0,0 +1,16 @@
+@echo OFF
+REM----------------------------------------------------------------------------
+REM Copyright (c) Microsoft Corporation.
+REM
+REM This source code is subject to terms and conditions of the Apache License,
+REM Version 2.0. A copy of the license can be found in the License.html file at
+REM the root of this distribution. If you cannot locate the Apache License,
+REM Version 2.0, please send an email to vspython@microsoft.com. By using this
+REM source code in any fashion, you are agreeing to be bound by the terms of the
+REM Apache License, Version 2.0.
+REM
+REM You must not remove this notice, or any other, from this software.
+REM----------------------------------------------------------------------------
+cls
+
+%SystemDrive%\Python27\Scripts\pip.exe install windowsazure --upgrade
\ No newline at end of file
diff --git a/src/setup.py b/src/setup.py
new file mode 100644
index 000000000000..1f3967691693
--- /dev/null
+++ b/src/setup.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+from distutils.core import setup
+
+setup(name='windowsazure',
+ version='0.2.2',
+ description='Windows Azure client APIs',
+ url='https://github.com/WindowsAzure/azure-sdk-for-python',
+ packages=['windowsazure',
+ 'windowsazure.http',
+ 'windowsazure.servicebus',
+ 'windowsazure.storage']
+ )
diff --git a/src/upload.bat b/src/upload.bat
new file mode 100644
index 000000000000..3e953e29013a
--- /dev/null
+++ b/src/upload.bat
@@ -0,0 +1,18 @@
+@echo OFF
+REM----------------------------------------------------------------------------
+REM Copyright (c) Microsoft Corporation.
+REM
+REM This source code is subject to terms and conditions of the Apache License,
+REM Version 2.0. A copy of the license can be found in the License.html file at
+REM the root of this distribution. If you cannot locate the Apache License,
+REM Version 2.0, please send an email to vspython@microsoft.com. By using this
+REM source code in any fashion, you are agreeing to be bound by the terms of the
+REM Apache License, Version 2.0.
+REM
+REM You must not remove this notice, or any other, from this software.
+REM----------------------------------------------------------------------------
+cls
+
+REM %SystemDrive%\Python27\python.exe setup.py register
+
+%SystemDrive%\Python27\python.exe setup.py sdist upload
\ No newline at end of file
diff --git a/src/windowsazure.pyproj b/src/windowsazure.pyproj
new file mode 100644
index 000000000000..08df07591d93
--- /dev/null
+++ b/src/windowsazure.pyproj
@@ -0,0 +1,55 @@
+
+
+
+ Debug
+ 2.0
+ {e60b72bf-ac42-4615-b1e7-57cb627260ae}
+ .
+
+
+
+
+ .
+ .
+ windowsazure
+ windowsazure
+
+
+ true
+ false
+
+
+ true
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/windowsazure.sln b/src/windowsazure.sln
new file mode 100644
index 000000000000..bc8447cff9e1
--- /dev/null
+++ b/src/windowsazure.sln
@@ -0,0 +1,22 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "windowsazure", "windowsazure.pyproj", "{E60B72BF-AC42-4615-B1E7-57CB627260AE}"
+EndProject
+Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "windowsazuretest", "..\test\windowsazuretest.pyproj", "{C0742A2D-4862-40E4-8A28-036EECDBC614}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {E60B72BF-AC42-4615-B1E7-57CB627260AE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {E60B72BF-AC42-4615-B1E7-57CB627260AE}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {C0742A2D-4862-40E4-8A28-036EECDBC614}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {C0742A2D-4862-40E4-8A28-036EECDBC614}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/src/windowsazure.vssscc b/src/windowsazure.vssscc
new file mode 100644
index 000000000000..6cb031bcf512
--- /dev/null
+++ b/src/windowsazure.vssscc
@@ -0,0 +1,10 @@
+""
+{
+"FILE_VERSION" = "9237"
+"ENLISTMENT_CHOICE" = "NEVER"
+"PROJECT_FILE_RELATIVE_PATH" = ""
+"NUMBER_OF_EXCLUDED_FILES" = "0"
+"ORIGINAL_PROJECT_FILE_PATH" = ""
+"NUMBER_OF_NESTED_PROJECTS" = "0"
+"SOURCE_CONTROL_SETTINGS_PROVIDER" = "PROJECT"
+}
diff --git a/src/windowsazure/__init__.py b/src/windowsazure/__init__.py
new file mode 100644
index 000000000000..1eccf387ad1b
--- /dev/null
+++ b/src/windowsazure/__init__.py
@@ -0,0 +1,507 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import types
+from datetime import datetime
+from xml.dom import minidom
+import base64
+import urllib2
+import ast
+
+BLOB_SERVICE = 'blob'
+QUEUE_SERVICE = 'queue'
+TABLE_SERVICE = 'table'
+SERVICE_BUS_SERVICE = 'service_bus'
+
+BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
+QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
+TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
+SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
+
+DEV_BLOB_HOST = '127.0.0.1:10000'
+DEV_QUEUE_HOST = '127.0.0.1:10001'
+DEV_TABLE_HOST = '127.0.0.1:10002'
+
+DEV_ACCOUNT_NAME = 'devstoreaccount1'
+DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
+
+class WindowsAzureData(object):
+ pass
+
+class ResponseHeader:
+ pass
+
+class ResponseError:
+ def __init__(self):
+ self.code = ''
+ self.message = ''
+
+class _Request:
+ def __init__(self):
+ self.host = ''
+ self.method = ''
+ self.uri = ''
+ self.query = []
+ self.header = []
+ self.body = ''
+ self.fail_on_exist = True
+ self.fail_not_exist = True
+
+class HTTPError(Exception):
+ def __init__(self, status, message, respheader, respbody):
+ self.message = message
+ self.status = status
+ self.respheader = respheader
+ self.respbody = respbody
+
+class WindowsAzureError(Exception):
+ def __init__(self, message):
+ self.message = message
+
+class Feed:
+ def __init__(self, type):
+ self.type = type
+
+class _Base64String(str):
+ pass
+
+def get_entry_properties(xmlstr, properties_name):
+ xmldoc = minidom.parseString(xmlstr)
+ properties = {}
+ for property_name in properties_name:
+ xml_properties = xmldoc.getElementsByTagName(property_name)
+ if xml_properties:
+ xml_property = xml_properties[0]
+ if xml_property.firstChild:
+ if property_name == 'name':
+ properties['author'] = xml_property.firstChild.nodeValue
+ elif property_name == 'id':
+ pos = xml_property.firstChild.nodeValue.rfind('/')
+ if pos != -1:
+ properties['name'] = xml_property.firstChild.nodeValue[pos+1:]
+ else:
+ properties['name'] = xml_property.firstChild.nodeValue
+ else:
+ properties[xml_property.nodeName] = xml_property.firstChild.nodeValue
+ return properties
+
+def create_entry(entry_body):
+ updated_str = datetime.utcnow().isoformat()
+ if datetime.utcnow().utcoffset() is None:
+ updated_str += '+00:00'
+
+ entry_start = '''
+
+
+{updated}
+
+ '''
+ entry_start = entry_start.format(updated=updated_str)
+ entry_end = ''
+
+ return entry_start + entry_body + entry_end
+
+def to_datetime(strtime):
+ values = strtime.split('T')
+ return datetime().strptime(values[0] + ' ' + values[1].split(' ')[1], '%Y-%m-%d %H:%M:%S.%f')
+
+def capitalize_words(element_name):
+ if element_name == 'include_apis':
+ return 'IncludeAPIs'
+ if element_name == 'message_id':
+ return 'MessageId'
+ if element_name == 'content_md5':
+ return 'Content-MD5'
+ elif element_name.startswith('x_ms_'):
+ return element_name.replace('_', '-')
+ if element_name.endswith('_id'):
+ element_name = element_name.replace('_id', 'ID')
+ for name in ['content_', 'last_modified', 'if_', 'cache_control']:
+ if element_name.startswith(name):
+ element_name = element_name.replace('_', '-_')
+ return ''.join(name.capitalize() for name in element_name.split('_'))
+
+def to_right_type(value):
+ if value is None or isinstance(value, dict):
+ return value
+ return str(value)
+
+def to_legalname(name):
+ if name == 'IncludeAPIs':
+ return 'include_apis'
+ name = name.split('=')[0]
+ if ':' in name:
+ name = name.split(':')[1]
+ name = name.replace('-', '_')
+ legalname = name[0]
+ for ch in name[1:]:
+ if ch.isupper():
+ legalname += '_'
+ legalname += ch
+ legalname = legalname.replace('__', '_').replace('_m_d5', '_md5')
+ return legalname.lower()
+
+def normalize_xml(xmlstr):
+ if xmlstr:
+ xmlstr = '>'.join(string.strip() for string in xmlstr.split('>'))
+ xmlstr = '<'.join(string.strip() for string in xmlstr.split('<'))
+ return xmlstr
+
+def remove_tag_namespace(name, to_lower=False):
+ new_name = name
+ if new_name.startswith('m:') or new_name.startswith('d:') or new_name.startswith('i:'):
+ new_name = new_name[2:]
+ elif new_name.startswith('')
+ tag_str = xmlstr[pos1:pos2+1]
+ non_tag_str = xmlstr[:pos1]
+ xmlstr = xmlstr[pos2+1:]
+ lower_xmlstr += non_tag_str
+ tag_items = tag_str.strip().split(' ')
+ new_tag = ''
+ for tag_item in tag_items:
+ if tag_item:
+ if '=' in tag_item:
+ pos3 = tag_item.find('=')
+ name = tag_item[:pos3]
+ value = tag_item[pos3+1]
+ new_name = remove_tag_namespace(name, to_lower)
+ tag_item = tag_item.replace(name + '=', new_name + '=')
+ else:
+ tag_item = remove_tag_namespace(tag_item, to_lower)
+ new_tag += tag_item + ' '
+ lower_xmlstr += new_tag.strip()
+
+ if not lower_xmlstr:
+ return xmlstr
+
+ return lower_xmlstr
+
+def convert_class_to_xml(source):
+ xmlstr = ''
+ if isinstance(source, list):
+ for value in source:
+ xmlstr += convert_class_to_xml(value)
+ elif type(source) is types.InstanceType or isinstance(source, WindowsAzureData):
+ class_name = source.__class__.__name__
+ xmlstr += '<' + class_name
+ if 'attributes' in dir(source):
+ attributes = getattr(source, 'attributes')
+ for name, value in attributes:
+ xmlstr += ' ' + name + '="' + value + '"'
+ xmlstr += '>'
+ for name, value in vars(source).iteritems():
+ if value is not None:
+ if isinstance(value, list) or type(value) is types.InstanceType or isinstance(value, WindowsAzureData):
+ xmlstr += convert_class_to_xml(value)
+ else:
+ xmlstr += '<' + capitalize_words(name) + '>' + str(value) + '' + capitalize_words(name) + '>'
+ xmlstr += '' + class_name + '>'
+ return xmlstr
+
+def convert_xml_to_feeds(xmlstr, convert_func):
+ feeds = []
+ xmldoc = minidom.parseString(xmlstr)
+ xml_entries = xmldoc.getElementsByTagName('entry')
+ for xml_entry in xml_entries:
+ feeds.append(convert_func(xml_entry.toxml()))
+ return feeds
+
+def validate_not_none(param_name, param):
+ if param is None:
+ raise ValueError('invalid_value: ', '%s should not be None.' % (param_name))
+
+def validate_length(param_name, param, valid_range):
+ valid_range = str(valid_range)
+ left = valid_range[0]
+ right = valid_range[-1]
+
+ if left not in ('[','(') or right not in (']',')'):
+ #raise ValueError('invalid_value_range_format: ', ''.join([param_name, ' has invalid range format:', valid_range, '. Length format should be like [1,3] or (1, 3).']))
+ raise ValueError('invalid_value_range_format: ', '% has invalid range format: %s. Length format should be like [1,3] or (1, 3).' % (param_name, valid_range))
+ try:
+ valid_range = valid_range[1:-1]
+ left_value, right_value = valid_range.split(',')
+ left_value = int(left_value.strip())
+ right_value = int(right_value.strip())
+ if left == '[' and len(param) < left_value or left == '(' and len(param) <= left_value or right == ']' and len(param) > right_value or right == ')' and len(param) > right_value:
+ #raise ValueError('invalid_value: ', ''.join([param_name, ' should be in range ', valid_range]))
+ raise ValueError('invalid_value: ', '%s should be in range %s.' % (param_name, valid_range))
+ except:
+ raise ValueError('invalid_value_range_format: ', '%s has invalid length range. The length should be integer.' % (param_name))
+
+def validate_values(param_name, param, valid_values):
+ valid_values = str(valid_values)
+ if not param in valid_values:
+ raise ValueError('invalid_value: ', '%s has invalid value. Allowed values are:' % (param_name, valid_values))
+
+def html_encode(html):
+ ch_map = (('&', '&'), ('<', '<'), ('>', '>'), ('"', '"'), ('\'', '&apos'))
+ for name, value in ch_map:
+ html = html.replace(name, value)
+ return html
+
+
+
+ #move to some class
+def fill_list(xmlstr, parent_node_name, module_name):
+ '''
+ Extract values of child Nodes of parentNodeName from xmlstr and add all the values to the list, and return the list
+ the elementName is the parent and the way we search the child node is using following rules:
+ (1) remove the last 's': deployments->deployment
+ (2) change the last 'ies' to 'y': properties->property
+ (3) remove the last 'list': InputEndpointList->InputEndpoint
+ (4) use elementName as child node name
+ moduleName is used to extract the predefined class name to check whether this is instance list or base type list
+ '''
+
+ xmldoc = minidom.parseString(xmlstr)
+
+ child_node_name = parent_node_name
+
+ xmlelements = None
+ if parent_node_name.endswith('s'):
+ child_node_name = parent_node_name[:-1]
+ xmlelements = xmldoc.getElementsByTagName(capitalize_words(child_node_name))
+ elif parent_node_name.endswith('ies'):
+ child_node_name = parent_node_name[:-3] + 'y'
+ xmlelements = xmldoc.getElementsByTagName(capitalize_words(child_node_name))
+ elif parent_node_name.endswith('List'):
+ child_node_name = parent_node_name.replace('List','')
+ xmlelements = xmldoc.getElementsByTagName(capitalize_words(child_node_name))
+ else:
+ child_node_name = parent_node_name
+ xmlelements = xmldoc.getElementsByTagName(capitalize_words(child_node_name))
+
+ if not xmlelements:
+ return []
+
+ return_list = []
+ for xmlelement in xmlelements:
+ from_list = '.'.join(module_name.split('.')[:-1])
+ _module = __import__(module_name, fromlist=from_list)
+ new_child_node_name = capitalize_words(child_node_name)
+ if new_child_node_name in dir(_module):
+ return_list.append(_parse_response(xmlelement.toxml(), getattr(_module, new_child_node_name)))
+ else:
+ return_list.append(xmlelement.nodeValue)
+
+ return return_list
+
+def fill_instance(xmlstr, element_name, return_type):
+ '''
+ Extract the value of elementName and put it into corresponding class instance. return the instance.
+ moduleName is used to get class structure.
+ '''
+ xmldoc = minidom.parseString(xmlstr)
+ xmlelements = xmldoc.getElementsByTagName(capitalize_words(element_name))
+
+ if not xmlelements:
+ return None
+
+ xmlelement = xmlelements[0]
+
+ return _parse_response(xmlelement.toxml(), return_type)
+
+def fill_data(xmlstr, element_name, data_member):
+ xmldoc = minidom.parseString(xmlstr)
+ xmlelements = xmldoc.getElementsByTagName(capitalize_words(element_name))
+
+ if not xmlelements or not xmlelements[0].childNodes:
+ return None
+
+ value = xmlelements[0].firstChild.nodeValue
+
+ if data_member is None:
+ return value
+ elif isinstance(data_member, datetime):
+ return to_datetime(value)
+ elif isinstance(data_member, _Base64String):
+ return base64.b64decode(value)
+ elif type(data_member) is types.BooleanType:
+ return value.lower() != 'false'
+ else:
+ return type(data_member)(value)
+
+def _get_request_body(request_body):
+
+ if request_body == None:
+ return ''
+
+ if type(request_body) is types.InstanceType or isinstance(request_body, WindowsAzureData):
+ request_body = '' + convert_class_to_xml(request_body)
+
+ if not request_body.strip().startswith('<'):
+ return request_body
+
+ updated_str = datetime.utcnow().isoformat()
+ if datetime.utcnow().utcoffset() is None:
+ updated_str += '+00:00'
+
+ if request_body:
+ request_body = normalize_xml(request_body).strip()
+
+ return request_body
+
+def _get_response_header(service_instance):
+ return_obj = ResponseHeader()
+ if service_instance.respheader:
+ for name, value in service_instance.respheader:
+ setattr(return_obj, to_legalname(name), value)
+ return_obj.status = service_instance.status
+ return_obj.message = service_instance.message
+ return return_obj
+
+def _parse_response(respbody, return_type):
+ '''
+ parse the xml response and fill all the data into a class of return_type
+ '''
+ normalize_xml(respbody)
+
+ return_obj = return_type()
+ for name, value in vars(return_obj).iteritems():
+ if isinstance(value, list):
+ setattr(return_obj, name, fill_list(respbody, name, return_obj.__module__))
+ elif type(value) is types.InstanceType or isinstance(value, WindowsAzureData):
+ setattr(return_obj, name, fill_instance(respbody, name, value.__class__))
+ else:
+ value = fill_data(respbody, name, value)
+ if value is not None:
+ setattr(return_obj, name, value)
+
+ return return_obj
+
+def _update_request_uri_query(request, use_local_storage=False):
+ if '?' in request.uri:
+ pos = request.uri.find('?')
+ query_string = request.uri[pos+1:]
+ request.uri = request.uri[:pos]
+ if query_string:
+ query_params = query_string.split('&')
+ for query in query_params:
+ if '=' in query:
+ pos = query.find('=')
+ name = query[:pos]
+ value = query[pos+1:]
+ request.query.append((name, value))
+
+ request.uri = urllib2.quote(request.uri, '/()$=\',')
+ if request.query:
+ request.uri += '?'
+ for name, value in request.query:
+ if value is not None:
+ request.uri += name + '=' + urllib2.quote(value, '/()$=\',') + '&'
+ request.uri = request.uri[:-1]
+ if use_local_storage:
+ request.uri = '/' + DEV_ACCOUNT_NAME + request.uri
+ return request.uri, request.query
+ else:
+ if use_local_storage:
+ request.uri = '/' + DEV_ACCOUNT_NAME + request.uri
+ return request.uri, request.query
+
+def _dont_fail_on_exist(error):
+ if error.message.lower() == 'conflict':
+ return False
+ else:
+ raise error
+
+def _dont_fail_not_exist(error):
+ if error.message.lower() == 'not found':
+ return False
+ else:
+ raise error
+
+def _parse_response_for_dict(service_instance):
+ http_headers = ['server', 'date', 'location', 'host',
+ 'via', 'proxy-connection', 'x-ms-version', 'connection',
+ 'content-length', 'x-ms-request-id']
+ if service_instance.respheader:
+ return_dict = {}
+ for name, value in service_instance.respheader:
+ if not name.lower() in http_headers:
+ return_dict[name] = value
+ return return_dict
+
+def _parse_response_for_dict_prefix(service_instance, prefix):
+ return_dict = {}
+ orig_dict = _parse_response_for_dict(service_instance)
+ if orig_dict:
+ for name, value in orig_dict.iteritems():
+ for prefix_value in prefix:
+ if name.lower().startswith(prefix_value.lower()):
+ return_dict[name] = value
+ break
+ return return_dict
+ else:
+ return None
+
+def _parse_response_for_dict_filter(service_instance, filter):
+ return_dict = {}
+ orig_dict = _parse_response_for_dict(service_instance)
+ if orig_dict:
+ for name, value in orig_dict.iteritems():
+ if name.lower() in filter:
+ return_dict[name] = value
+ return return_dict
+ else:
+ return None
+
+def _parse_response_for_dict_special(service_instance, prefix, filter):
+ return_dict = {}
+ orig_dict = _parse_response_for_dict(service_instance)
+ if orig_dict:
+ for name, value in orig_dict.iteritems():
+ if name.lower() in filter:
+ return_dict[name] = value
+ else:
+ for prefix_value in prefix:
+ if name.lower().startswith(prefix_value.lower()):
+ return_dict[name] = value
+ break
+ return return_dict
+ else:
+ return None
+
+def get_host(service_type, account_name, use_local_storage=False):
+ if use_local_storage:
+ if service_type == BLOB_SERVICE:
+ return DEV_BLOB_HOST
+ elif service_type == QUEUE_SERVICE:
+ return DEV_QUEUE_HOST
+ elif service_type == TABLE_SERVICE:
+ return DEV_TABLE_HOST
+ elif service_type == SERVICE_BUS_SERVICE:
+ return account_name + SERVICE_BUS_HOST_BASE
+ else:
+ if service_type == BLOB_SERVICE:
+ return account_name + BLOB_SERVICE_HOST_BASE
+ elif service_type == QUEUE_SERVICE:
+ return account_name + QUEUE_SERVICE_HOST_BASE
+ elif service_type == TABLE_SERVICE:
+ return account_name + TABLE_SERVICE_HOST_BASE
+ else:
+ return account_name + SERVICE_BUS_HOST_BASE
+
diff --git a/src/windowsazure/http/__init__.py b/src/windowsazure/http/__init__.py
new file mode 100644
index 000000000000..125039ae319e
--- /dev/null
+++ b/src/windowsazure/http/__init__.py
@@ -0,0 +1,12 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
\ No newline at end of file
diff --git a/src/windowsazure/http/batchclient.py b/src/windowsazure/http/batchclient.py
new file mode 100644
index 000000000000..3b7dce1870cc
--- /dev/null
+++ b/src/windowsazure/http/batchclient.py
@@ -0,0 +1,163 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import urllib2
+from windowsazure.http.httpclient import _HTTPClient
+from windowsazure import _Request, _update_request_uri_query, WindowsAzureError, HTTPError
+from windowsazure.storage import _update_storage_table_header
+
+class _BatchClient(_HTTPClient):
+
+ def __init__(self, service_instance, account_key, account_name, x_ms_version=None, protocol='http'):
+ _HTTPClient.__init__(self, service_instance, account_name=account_name, account_key=account_key, x_ms_version=x_ms_version, protocol=protocol)
+ self.is_batch = False
+ self.batch_requests = []
+ self.batch_table = ''
+ self.batch_partition_key = ''
+ self.batch_row_keys = []
+
+ def get_request_table(self, request):
+ if '(' in request.uri:
+ pos = request.uri.find('(')
+ return request.uri[1:pos]
+ else:
+ return request.uri[1:]
+
+ def get_request_partition_key(self, request):
+ if request.method == 'POST':
+ pos1 = request.body.find('')
+ pos2 = request.body.find('')
+ if pos1 == -1 or pos2 == -1:
+ raise WindowsAzureError('Cannot find partition key in request.')
+ return request.body[pos1 + len(''):pos2]
+ else:
+ uri = urllib2.unquote(request.uri)
+ pos1 = uri.find('PartitionKey=\'')
+ pos2 = uri.find('\',', pos1)
+ if pos1 == -1 or pos2 == -1:
+ raise WindowsAzureError('Cannot find partition key in request.')
+ return uri[pos1 + len('PartitionKey=\''):pos2]
+
+ def get_request_row_key(self, request):
+ if request.method == 'POST':
+ pos1 = request.body.find('')
+ pos2 = request.body.find('')
+ if pos1 == -1 or pos2 == -1:
+ raise WindowsAzureError('Cannot find row key in request.')
+ return request.body[pos1 + len(''):pos2]
+ else:
+ uri = urllib2.unquote(request.uri)
+ pos1 = uri.find('RowKey=\'')
+ pos2 = uri.find('\')', pos1)
+ if pos1 == -1 or pos2 == -1:
+ raise WindowsAzureError('Cannot find row key in request.')
+ row_key = uri[pos1 + len('RowKey=\''):pos2]
+ return row_key
+
+ def validate_request_table(self, request):
+ if self.batch_table:
+ if self.get_request_table(request) != self.batch_table:
+ raise WindowsAzureError('Table should be the same in a batch operations')
+ else:
+ self.batch_table = self.get_request_table(request)
+
+ def validate_request_partition_key(self, request):
+ if self.batch_partition_key:
+ if self.get_request_partition_key(request) != self.batch_partition_key:
+ raise WindowsAzureError('Partition Key should be the same in a batch operations')
+ else:
+ self.batch_partition_key = self.get_request_partition_key(request)
+
+ def validate_request_row_key(self, request):
+ if self.batch_row_keys:
+ if self.get_request_row_key(request) in self.batch_row_keys:
+ raise WindowsAzureError('Row Key should not be the same in a batch operations')
+ else:
+ self.batch_row_keys.append(self.get_request_row_key(request))
+
+ def begin_batch(self):
+ self.is_batch = True
+ self.batch_table = ''
+ self.batch_partition_key = ''
+ self.batch_row_keys = []
+ self.batch_requests = []
+
+ def insert_request_to_batch(self, request):
+ self.validate_request_table(request)
+ self.validate_request_partition_key(request)
+ self.validate_request_row_key(request)
+ self.batch_requests.append(request)
+
+ def commit_batch(self):
+ if self.is_batch:
+ self.is_batch = False
+ resp = self.commit_batch_requests()
+ return resp
+
+ def commit_batch_requests(self):
+ batch_boundary = 'batch_a2e9d677-b28b-435e-a89e-87e6a768a431'
+ changeset_boundary = 'changeset_8128b620-b4bb-458c-a177-0959fb14c977'
+ if self.batch_requests:
+ request = _Request()
+ request.method = 'POST'
+ request.host = self.batch_requests[0].host
+ request.uri = '/$batch'
+ request.header = [('Content-Type', 'multipart/mixed; boundary=' + batch_boundary),
+ ('Accept', 'application/atom+xml,application/xml'),
+ ('Accept-Charset', 'UTF-8')]
+
+ request.body = '--' + batch_boundary + '\n'
+ request.body += 'Content-Type: multipart/mixed; boundary=' + changeset_boundary + '\n\n'
+
+ content_id = 1
+ for batch_request in self.batch_requests:
+ request.body += '--' + changeset_boundary + '\n'
+ request.body += 'Content-Type: application/http\n'
+ request.body += 'Content-Transfer-Encoding: binary\n\n'
+
+ request.body += batch_request.method + ' http://' + batch_request.host + batch_request.uri + ' HTTP/1.1\n'
+ request.body += 'Content-ID: ' + str(content_id) + '\n'
+ content_id += 1
+
+ if not batch_request.method == 'DELETE':
+ request.body += 'Content-Type: application/atom+xml;type=entry\n'
+ request.body += 'Content-Length: ' + str(len(batch_request.body)) + '\n\n'
+ request.body += batch_request.body + '\n'
+ else:
+ find_if_match = False
+ for name, value in batch_request.header:
+ if name == 'If-Match':
+ request.body += name + ': ' + value + '\n\n'
+ find_if_match = True
+ break
+ if not find_if_match:
+ request.body += 'If-Match: *\n\n'
+
+ request.body += '--' + changeset_boundary + '--' + '\n'
+ request.body += '--' + batch_boundary + '--'
+
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+
+
+ resp = self.perform_request(request)
+ pos1 = resp.find('HTTP/1.1 ') + len('HTTP/1.1 ')
+ pos2 = resp.find(' ', pos1)
+ status = resp[pos1:pos2]
+ if int(status) >= 300:
+ raise HTTPError(status, 'Batch Commit Fail', self.respheader, resp)
+ return resp
+
+ def cancel_batch(self):
+ self.is_batch = False
+
+
\ No newline at end of file
diff --git a/src/windowsazure/http/httpclient.py b/src/windowsazure/http/httpclient.py
new file mode 100644
index 000000000000..737b35073a5c
--- /dev/null
+++ b/src/windowsazure/http/httpclient.py
@@ -0,0 +1,85 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import os
+import types
+import base64
+import datetime
+import time
+import hashlib
+import hmac
+import urllib2
+import httplib
+import ast
+import sys
+from xml.dom import minidom
+
+from windowsazure import HTTPError
+
+class _HTTPClient:
+ def __init__(self, service_instance, cert_file=None, account_name=None, account_key=None, service_namespace=None, issuer=None, x_ms_version=None, protocol='https'):
+ self.service_instance = service_instance
+ self.status = None
+ self.respheader = None
+ self.message = None
+ self.cert_file = cert_file
+ self.account_name = account_name
+ self.account_key = account_key
+ self.service_namespace = service_namespace
+ self.issuer = issuer
+ self.x_ms_version = x_ms_version
+ self.protocol = protocol
+
+ def get_connection(self, request):
+ if sys.platform.lower().startswith('win'):
+ import windowsazure.http.winhttp
+ _connection = windowsazure.http.winhttp._HTTPConnection(request.host, cert_file=self.cert_file, protocol=self.protocol)
+ elif self.protocol == 'http':
+ _connection = httplib.HTTPConnection(request.host)
+ else:
+ _connection = httplib.HTTPSConnection(request.host, cert_file=self.cert_file)
+ return _connection
+
+ def send_request_headers(self, connection, request_headers):
+ for name, value in request_headers:
+ if value:
+ connection.putheader(name, value)
+ connection.endheaders()
+
+ def send_request_body(self, connection, request_body):
+ if request_body:
+ connection.send(request_body)
+ elif (not isinstance(connection, httplib.HTTPSConnection) and
+ not isinstance(connection, httplib.HTTPConnection)):
+ connection.send(None)
+
+ def perform_request(self, request):
+ """Send requst to server"""
+ connection = self.get_connection(request)
+ connection.putrequest(request.method, request.uri)
+ self.send_request_headers(connection, request.header)
+ self.send_request_body(connection, request.body)
+
+ resp = connection.getresponse()
+ self.status = int(resp.status)
+ self.message = resp.reason
+ self.respheader = resp.getheaders()
+ respbody = None
+ if resp.length is None:
+ respbody = resp.read()
+ elif resp.length > 0:
+ respbody = resp.read(resp.length)
+
+ if self.status >= 300:
+ raise HTTPError(self.status, self.message, self.respheader, respbody)
+
+ return respbody
diff --git a/src/windowsazure/http/winhttp.py b/src/windowsazure/http/winhttp.py
new file mode 100644
index 000000000000..1d40e0bf8adf
--- /dev/null
+++ b/src/windowsazure/http/winhttp.py
@@ -0,0 +1,264 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+from ctypes import c_void_p, c_long, c_ulong, c_longlong, c_ulonglong, c_short, c_ushort, c_wchar_p, c_byte
+from ctypes import byref, Structure, Union, POINTER, WINFUNCTYPE, HRESULT, oledll, WinDLL, cast, create_string_buffer
+import ctypes
+import urllib2
+
+VT_EMPTY = 0
+VT_NULL = 1
+VT_I2 = 2
+VT_I4 = 3
+VT_BSTR = 8
+VT_BOOL = 11
+VT_I1 = 16
+VT_UI1 = 17
+VT_UI2 = 18
+VT_UI4 = 19
+VT_I8 = 20
+VT_UI8 = 21
+VT_ARRAY = 8192
+
+HTTPREQUEST_PROXY_SETTING = c_long
+HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long
+
+_ole32 = oledll.ole32
+_oleaut32 = WinDLL('oleaut32')
+_CLSIDFromString = _ole32.CLSIDFromString
+_CoInitialize = _ole32.CoInitialize
+_CoCreateInstance = _ole32.CoCreateInstance
+_SysAllocString = _oleaut32.SysAllocString
+_SysFreeString = _oleaut32.SysFreeString
+_SafeArrayDestroy = _oleaut32.SafeArrayDestroy
+_CoTaskMemAlloc = _ole32.CoTaskMemAlloc
+
+class BSTR(c_wchar_p):
+ def __init__(self, value):
+ super(BSTR, self).__init__(_SysAllocString(value))
+
+ def __del__(self):
+ _SysFreeString(self)
+
+class _tagSAFEARRAY(Structure):
+ class _tagSAFEARRAYBOUND(Structure):
+ _fields_ = [('c_elements', c_ulong), ('l_lbound', c_long)]
+
+ _fields_ = [('c_dims', c_ushort),
+ ('f_features', c_ushort),
+ ('cb_elements', c_ulong),
+ ('c_locks', c_ulong),
+ ('pvdata', c_void_p),
+ ('rgsabound', _tagSAFEARRAYBOUND*1)]
+
+ def __del__(self):
+ _SafeArrayDestroy(self.pvdata)
+ pass
+
+class VARIANT(Structure):
+ class _tagData(Union):
+ class _tagRecord(Structure):
+ _fields_= [('pvoid', c_void_p), ('precord', c_void_p)]
+
+ _fields_ = [('llval', c_longlong),
+ ('ullval', c_ulonglong),
+ ('lval', c_long),
+ ('ulval', c_ulong),
+ ('ival', c_short),
+ ('boolval', c_ushort),
+ ('bstrval', BSTR),
+ ('parray', POINTER(_tagSAFEARRAY)),
+ ('record', _tagRecord)]
+
+ _fields_ = [('vt', c_ushort),
+ ('wReserved1', c_ushort),
+ ('wReserved2', c_ushort),
+ ('wReserved3', c_ushort),
+ ('vdata', _tagData)]
+
+class GUID(Structure):
+ """Represents vector data."""
+ _fields_ = [("data1", c_ulong),
+ ("data2", c_ushort),
+ ("data3", c_ushort),
+ ("data4", c_byte*8)]
+
+ def __init__(self, name=None):
+ if name is not None:
+ _CLSIDFromString(unicode(name), byref(self))
+
+
+class _WinHttpRequest(c_void_p):
+ _SetProxy = WINFUNCTYPE(HRESULT, HTTPREQUEST_PROXY_SETTING, VARIANT, VARIANT)(7, 'SetProxy')
+ _SetCredentials = WINFUNCTYPE(HRESULT, BSTR, BSTR, HTTPREQUEST_SETCREDENTIALS_FLAGS)(8, 'SetCredentials')
+ _Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT)(9, 'Open')
+ _SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR)(10, 'SetRequestHeader')
+ _GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p))(11, 'GetResponseHeader')
+ _GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(12, 'GetAllResponseHeaders')
+ _Send = WINFUNCTYPE(HRESULT, VARIANT)(13, 'Send')
+ _Status = WINFUNCTYPE(HRESULT, POINTER(c_long))(14, 'Status')
+ _StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(15, 'StatusText')
+ _ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(16, 'ResponseText')
+ _ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT))(17, 'ResponseBody')
+ _ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT))(18, 'ResponseStream')
+ _WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort))(21, 'WaitForResponse')
+ _Abort = WINFUNCTYPE(HRESULT)(22, 'Abort')
+ _SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long)(23, 'SetTimeouts')
+ _SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR)(24, 'SetClientCertificate')
+
+ def open(self, method, url):
+ flag = VARIANT()
+ flag.vt = VT_BOOL
+ flag.vdata.boolval = 0
+
+ _method = BSTR(method)
+ _url = BSTR(url)
+ _WinHttpRequest._Open(self, _method, _url, flag)
+
+ def set_request_header(self, name, value):
+ _name = BSTR(name)
+ _value = BSTR(value)
+ _WinHttpRequest._SetRequestHeader(self, _name, _value)
+
+ def get_all_response_headers(self):
+ bstr_headers = c_void_p()
+ _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))
+ bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)
+ headers = bstr_headers.value
+ _SysFreeString(bstr_headers)
+ return headers
+
+ def send(self, request = None):
+ if request is None:
+ var_empty = VARIANT()
+ var_empty.vt = VT_EMPTY
+ var_empty.vdata.llval = 0
+ _WinHttpRequest._Send(self, var_empty)
+ else:
+ _request = VARIANT()
+ _request.vt = VT_ARRAY | VT_UI1
+ safearray = _tagSAFEARRAY()
+ safearray.c_dims = 1
+ safearray.cb_elements = 1
+ safearray.c_locks = 0
+ safearray.f_features = 128
+ safearray.rgsabound[0].c_elements = len(request)
+ safearray.rgsabound[0].l_lbound = 0
+ safearray.pvdata = cast(_CoTaskMemAlloc(len(request)), c_void_p)
+ ctypes.memmove(safearray.pvdata, request, len(request))
+ _request.vdata.parray = cast(byref(safearray), POINTER(_tagSAFEARRAY))
+ _WinHttpRequest._Send(self, _request)
+
+ def status(self):
+ status = c_long()
+ _WinHttpRequest._Status(self, byref(status))
+ return int(status.value)
+
+ def status_text(self):
+ bstr_status_text = c_void_p()
+ _WinHttpRequest._StatusText(self, byref(bstr_status_text))
+ bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)
+ status_text = bstr_status_text.value
+ _SysFreeString(bstr_status_text)
+ return status_text
+
+ def response_text(self):
+ bstr_resptext = c_void_p()
+ _WinHttpRequest._ResponseText(self, byref(bstr_resptext))
+ bstr_resptext = ctypes.cast(bstr_resptext, c_wchar_p)
+ resptext = bstr_resptext.value
+ _SysFreeString(bstr_resptext)
+ return resptext
+
+ def response_body(self):
+ var_respbody = VARIANT()
+ _WinHttpRequest._ResponseBody(self, byref(var_respbody))
+ if var_respbody.vt == VT_ARRAY | VT_UI1:
+ safearray = var_respbody.vdata.parray.contents
+ respbody = ctypes.string_at(safearray.pvdata, safearray.rgsabound[0].c_elements)
+
+ if respbody[3:].startswith('= 200 and int(resp.status) < 300:
+ if resp.length:
+ token = resp.read(resp.length)
+ else:
+ raise HTTPError(resp.status, resp.reason, resp.getheaders(), None)
+ else:
+ raise HTTPError(resp.status, resp.reason, resp.getheaders(), None)
+
+ token = urllib2.unquote(token[token.find('=')+1:token.rfind('&')])
+ _tokens[wrap_scope] = token
+
+ return token
+
+def _create_message(service_instance, respbody):
+ custom_properties = {}
+ broker_properties = None
+ message_type = None
+ message_location = None
+ for name, value in service_instance.respheader:
+ if name.lower() == 'brokerproperties':
+ broker_properties = ast.literal_eval(value)
+ elif name.lower() == 'content-type':
+ message_type = value
+ elif name.lower() == 'location':
+ message_location = value
+ elif name.lower() not in ['content-type', 'brokerproperties', 'transfer-encoding', 'server', 'location', 'date']:
+ if '"' in value:
+ custom_properties[name] = value[1:-1]
+ else:
+ custom_properties[name] = value
+ if message_type == None:
+ message = Message(respbody, service_instance, message_location, custom_properties, broker_properties)
+ else:
+ message = Message(respbody, service_instance, message_location, custom_properties, message_type, broker_properties)
+ return message
+
+#convert functions
+def convert_xml_to_rule(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ xmlstr = remove_xmltag_namespace(xmlstr, to_lower=True)
+ xmldoc = minidom.parseString(xmlstr)
+ rule = Rule()
+
+ xml_filters = xmldoc.getElementsByTagName('filter')
+ if xml_filters:
+ xml_filter = xml_filters[0]
+ filter_type = xml_filter.getAttribute('type')
+ setattr(rule, 'filter_type', str(filter_type))
+ if xml_filter.childNodes:
+ filter_expression = xml_filter.childNodes[0].firstChild
+ if filter_expression:
+ setattr(rule, 'filter_expression', filter_expression.nodeValue)
+
+ xml_actions = xmldoc.getElementsByTagName('action')
+ if xml_actions:
+ xml_action = xml_actions[0]
+ action_type = xml_action.getAttribute('type')
+ setattr(rule, 'action_type', str(action_type))
+ if xml_action.childNodes:
+ action_expression = xml_action.childNodes[0].firstChild
+ if action_expression:
+ setattr(rule, 'filter_expression', action_expression.nodeValue)
+
+ for name, value in get_entry_properties(xmlstr, ['id', 'updated', 'name']).iteritems():
+ setattr(rule, name, value)
+
+ return rule
+
+def convert_xml_to_queue(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ xmlstr = remove_xmltag_namespace(xmlstr, to_lower=True)
+ xmldoc = minidom.parseString(xmlstr)
+ queue = Queue()
+
+ invalid_queue = True
+ for attr_name, attr_value in vars(queue).iteritems():
+ tag_name = attr_name.replace('_', '')
+ xml_attrs = xmldoc.getElementsByTagName(tag_name)
+ if xml_attrs:
+ xml_attr = xml_attrs[0]
+ if xml_attr.firstChild:
+ setattr(queue, attr_name, xml_attr.firstChild.nodeValue)
+ invalid_queue = False
+
+ if invalid_queue:
+ raise WindowsAzureError('Queue is not Found')
+
+ for name, value in get_entry_properties(xmlstr, ['id', 'updated', 'name']).iteritems():
+ setattr(queue, name, value)
+
+ return queue
+
+def convert_xml_to_topic(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ xmlstr = remove_xmltag_namespace(xmlstr, to_lower=True)
+ xmldoc = minidom.parseString(xmlstr)
+ topic = Topic()
+
+ invalid_topic = True
+ for attr_name, attr_value in vars(topic).iteritems():
+ tag_name = attr_name.replace('_', '')
+ xml_attrs = xmldoc.getElementsByTagName(tag_name)
+ if xml_attrs:
+ xml_attr = xml_attrs[0]
+ if xml_attr.firstChild:
+ setattr(topic, attr_name, xml_attr.firstChild.nodeValue)
+ invalid_topic = False
+
+ if invalid_topic:
+ raise WindowsAzureError('Topic is not Found')
+
+ for name, value in get_entry_properties(xmlstr, ['id', 'updated', 'name']).iteritems():
+ setattr(topic, name, value)
+ return topic
+
+def convert_xml_to_subscription(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ xmlstr = remove_xmltag_namespace(xmlstr, to_lower=True)
+ xmldoc = minidom.parseString(xmlstr)
+ subscription = Subscription()
+ for attr_name, attr_value in vars(subscription).iteritems():
+ tag_name = attr_name.replace('_', '')
+ xml_attrs = xmldoc.getElementsByTagName(tag_name)
+ if xml_attrs:
+ xml_attr = xml_attrs[0]
+ if xml_attr.firstChild:
+ setattr(subscription, attr_name, xml_attr.firstChild.nodeValue)
+ for name, value in get_entry_properties(xmlstr, ['id', 'updated', 'name']).iteritems():
+ setattr(subscription, name, value)
+ return subscription
+
+def convert_subscription_to_xml(subscription):
+ subscription_body = ''
+ if subscription:
+ if subscription.lock_duration:
+ subscription_body += ''.join(['', subscription.lock_duration, ''])
+ if subscription.requires_session:
+ subscription_body += ''.join(['', subscription.requires_session, ''])
+ if subscription.default_message_time_to_live:
+ subscription_body += ''.join(['', subscription.default_message_time_to_live, ''])
+ if subscription.dead_lettering_on_message_expiration:
+ subscription_body += ''.join(['', subscription.dead_lettering_on_message_expiration, ''])
+ if subscription.dead_lettering_on_filter_evaluation_exceptions:
+ subscription_body += ''.join(['', subscription.dead_lettering_on_filter_evaluation_exceptions, ''])
+ if subscription.enable_batched_operations:
+ subscription_body += ''.join(['', subscription.enable_batched_operations, ''])
+ if subscription.max_delivery_count:
+ subscription_body += ''.join(['', subscription.max_delivery_count, ''])
+ if subscription.message_count:
+ subscription_body += ''.join(['', subscription.message_count, ''])
+
+ subscription_body += ''
+ return create_entry(subscription_body)
+
+def convert_rule_to_xml(rule):
+ rule_body = ''
+ if rule:
+ if rule.filter_type:
+ rule_body += ''.join([''])
+ if rule.filter_type == 'CorrelationFilter':
+ rule_body += ''.join(['', html_encode(rule.filter_expression), ''])
+ else:
+ rule_body += ''.join(['', html_encode(rule.filter_expression), ''])
+ rule_body += '20'
+ rule_body += ''
+ if rule.action_type:
+ rule_body += ''.join([''])
+ if rule.action_type == 'SqlFilterAction':
+ rule_body += ''.join(['', html_encode(rule.action_expression), ''])
+ rule_body += ''
+ rule_body += ''
+
+ return create_entry(rule_body)
+
+def convert_topic_to_xml(topic):
+ topic_body = ''
+ if topic:
+ if topic.default_message_time_to_live:
+ topic_body += ''.join(['', topic.default_message_time_to_live, ''])
+ if topic.max_size_in_mega_bytes:
+ topic_body += ''.join(['', topic.default_message_time_to_live, ''])
+ if topic.requires_duplicate_detection:
+ topic_body += ''.join(['', topic.default_message_time_to_live, ''])
+ if topic.duplicate_detection_history_time_window:
+ topic_body += ''.join(['', topic.default_message_time_to_live, ''])
+ if topic.enable_batched_operations:
+ topic_body += ''.join(['', topic.default_message_time_to_live, ''])
+ if topic.size_in_bytes:
+ topic_body += ''.join(['', topic.default_message_time_to_live, ''])
+ topic_body += ''
+
+ return create_entry(topic_body)
+
+def convert_queue_to_xml(queue):
+ queue_body = ''
+ if queue:
+ if queue.lock_duration:
+ queue_body += ''.join(['', queue.lock_duration, ''])
+ if queue.max_size_in_megabytes:
+ queue_body += ''.join(['', queue.max_size_in_megabytes, ''])
+ if queue.requires_duplicate_detection:
+ queue_body += ''.join(['', queue.requires_duplicate_detection, ''])
+ if queue.requires_session:
+ queue_body += ''.join(['', queue.requires_session, ''])
+ if queue.default_message_time_to_live:
+ queue_body += ''.join(['', queue.default_message_time_to_live, ''])
+ if queue.enable_dead_lettering_on_message_expiration:
+ queue_body += ''.join(['', queue.enable_dead_lettering_on_message_expiration, ''])
+ if queue.duplicate_detection_history_time_window:
+ queue_body += ''.join(['', queue.duplicate_detection_history_time_window, ''])
+ if queue.max_delivery_count:
+ queue_body += ''.join(['', queue.max_delivery_count, ''])
+ if queue.enable_batched_operations:
+ queue_body += ''.join(['', queue.enable_batched_operations, ''])
+ if queue.size_in_bytes:
+ queue_body += ''.join(['', queue.size_in_bytes, ''])
+ if queue.message_count:
+ queue_body += ''.join(['', queue.message_count, ''])
+
+ queue_body += ''
+ return create_entry(queue_body)
+
+def _service_bus_error_handler(http_error):
+ if http_error.status == 409:
+ raise WindowsAzureError('Conflict')
+ elif http_error.status == 404:
+ raise WindowsAzureError('Not Found')
+ else:
+ raise WindowsAzureError('Unknown Error')
+
+from windowsazure.servicebus.servicebusservice import ServiceBusService
+
diff --git a/src/windowsazure/servicebus/servicebusservice.py b/src/windowsazure/servicebus/servicebusservice.py
new file mode 100644
index 000000000000..0d1fce6d0b4b
--- /dev/null
+++ b/src/windowsazure/servicebus/servicebusservice.py
@@ -0,0 +1,686 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import base64
+import os
+import urllib2
+
+from windowsazure.http.httpclient import _HTTPClient
+from windowsazure.servicebus import (_update_service_bus_header, _create_message,
+ convert_topic_to_xml, convert_xml_to_topic,
+ convert_queue_to_xml, convert_xml_to_queue,
+ convert_subscription_to_xml, convert_xml_to_subscription,
+ convert_rule_to_xml, convert_xml_to_rule,
+ _service_bus_error_handler, AZURE_SERVICEBUS_NAMESPACE,
+ AZURE_SERVICEBUS_ACCESS_KEY, AZURE_SERVICEBUS_ISSUER)
+from windowsazure import (validate_length, validate_values, validate_not_none, Feed, _Request,
+ convert_xml_to_feeds, to_right_type,
+ _get_request_body, _update_request_uri_query, get_host,
+ _dont_fail_on_exist, _dont_fail_not_exist, HTTPError,
+ WindowsAzureError, _parse_response, _Request, convert_class_to_xml,
+ _parse_response_for_dict, _parse_response_for_dict_prefix,
+ _parse_response_for_dict_filter, _parse_response_for_dict_special,
+ BLOB_SERVICE, QUEUE_SERVICE, TABLE_SERVICE, SERVICE_BUS_SERVICE)
+
+class ServiceBusService:
+
+ def create_queue(self, queue_name, queue=None, fail_on_exist=False):
+ '''
+ Creates a new queue. Once created, this queue's resource manifest is immutable.
+
+ queue: queue object to create.
+ queue_name: the name of the queue.
+ fail_on_exist: specify whether throw exception when queue exists.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + ''
+ request.body = _get_request_body(convert_queue_to_xml(queue))
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def delete_queue(self, queue_name, fail_not_exist=False):
+ '''
+ Deletes an existing queue. This operation will also remove all associated state
+ including messages in the queue.
+
+ fail_not_exist: specify whether throw exception when queue doesn't exist.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_queue(self, queue_name):
+ '''
+ Retrieves an existing queue.
+
+ queue_name: name of the queue.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_queue(respbody)
+
+ def list_queues(self):
+ '''
+ Enumerates the queues in the service namespace.
+ '''
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/$Resources/Queues'
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_feeds(respbody, convert_xml_to_queue)
+
+ def create_topic(self, topic_name, topic=None, fail_on_exist=False):
+ '''
+ Creates a new topic. Once created, this topic resource manifest is immutable.
+
+ topic_name: name of the topic.
+ topic: the Topic object to create.
+ fail_on_exist: specify whether throw exception when topic exists.
+ '''
+ validate_not_none('topic_name', topic_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + ''
+ request.body = _get_request_body(convert_topic_to_xml(topic))
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def delete_topic(self, topic_name, fail_not_exist=False):
+ '''
+ Deletes an existing topic. This operation will also remove all associated state
+ including associated subscriptions.
+
+ topic_name: name of the topic.
+ fail_not_exist: specify whether throw exception when topic doesn't exist.
+ '''
+ validate_not_none('topic_name', topic_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_topic(self, topic_name):
+ '''
+ Retrieves the description for the specified topic.
+
+ topic_name: name of the topic.
+ '''
+ validate_not_none('topic_name', topic_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_topic(respbody)
+
+ def list_topics(self):
+ '''
+ Retrieves the topics in the service namespace.
+ '''
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/$Resources/Topics'
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_feeds(respbody, convert_xml_to_topic)
+
+ def create_rule(self, topic_name, subscription_name, rule_name, rule=None, fail_on_exist=False):
+ '''
+ Creates a new rule. Once created, this rule's resource manifest is immutable.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ rule_name: name of the rule.
+ fail_on_exist: specify whether throw exception when rule exists.
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ validate_not_none('rule-name', rule_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/rules/' + to_right_type(rule_name) + ''
+ request.body = _get_request_body(convert_rule_to_xml(rule))
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def delete_rule(self, topic_name, subscription_name, rule_name, fail_not_exist=False):
+ '''
+ Deletes an existing rule.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ rule_name: name of the rule. DEFAULT_RULE_NAME=$Default. Use DEFAULT_RULE_NAME
+ to delete default rule for the subscription.
+ fail_not_exist: specify whether throw exception when rule doesn't exist.
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ validate_not_none('rule-name', rule_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/rules/' + to_right_type(rule_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_rule(self, topic_name, subscription_name, rule_name):
+ '''
+ Retrieves the description for the specified rule.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ rule_name: name of the rule
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ validate_not_none('rule-name', rule_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/rules/' + to_right_type(rule_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_rule(respbody)
+
+ def list_rules(self, topic_name, subscription_name):
+ '''
+ Retrieves the rules that exist under the specified subscription.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/rules/'
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_feeds(respbody, convert_xml_to_rule)
+
+ def create_subscription(self, topic_name, subscription_name, subscription=None, fail_on_exist=False):
+ '''
+ Creates a new subscription. Once created, this subscription resource manifest is
+ immutable.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ fail_on_exist: specify whether throw exception when subscription exists.
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + ''
+ request.body = _get_request_body(convert_subscription_to_xml(subscription))
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def delete_subscription(self, topic_name, subscription_name, fail_not_exist=False):
+ '''
+ Deletes an existing subscription.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ fail_not_exist: specify whether throw exception when subscription doesn't exist.
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_subscription(self, topic_name, subscription_name):
+ '''
+ Gets an existing subscription.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_subscription(respbody)
+
+ def list_subscriptions(self, topic_name):
+ '''
+ Retrieves the subscriptions in the specified topic.
+
+ topic_name: the name of the topic
+ '''
+ validate_not_none('topic-name', topic_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/'
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_feeds(respbody, convert_xml_to_subscription)
+
+ def send_topic_message(self, topic_name, message=None):
+ '''
+ Enqueues a message into the specified topic. The limit to the number of messages
+ which may be present in the topic is governed by the message size in MaxTopicSizeInBytes.
+ If this message causes the topic to exceed its quota, a quota exceeded error is
+ returned and the message will be rejected.
+
+ topic_name: name of the topic.
+ message: the Message object containing message body and properties.
+ '''
+ validate_not_none('topic-name', topic_name)
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/messages'
+ request.header = message.add_headers(request)
+ request.body = _get_request_body(message.body)
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ def peek_lock_subscription_message(self, topic_name, subscription_name, timeout='60'):
+ '''
+ This operation is used to atomically retrieve and lock a message for processing.
+ The message is guaranteed not to be delivered to other receivers during the lock
+ duration period specified in buffer description. Once the lock expires, the
+ message will be available to other receivers (on the same subscription only)
+ during the lock duration period specified in the topic description. Once the lock
+ expires, the message will be available to other receivers. In order to complete
+ processing of the message, the receiver should issue a delete command with the
+ lock ID received from this operation. To abandon processing of the message and
+ unlock it for other receivers, an Unlock Message command should be issued, or
+ the lock duration period can expire.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/messages/head'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return _create_message(self, respbody)
+
+ def unlock_subscription_message(self, topic_name, subscription_name, sequence_number, lock_token):
+ '''
+ Unlock a message for processing by other receivers on a given subscription.
+ This operation deletes the lock object, causing the message to be unlocked.
+ A message must have first been locked by a receiver before this operation
+ is called.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ sequence_name: The sequence number of the message to be unlocked as returned
+ in BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token: The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ validate_not_none('sequence-number', sequence_number)
+ validate_not_none('lock-token', lock_token)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/messages/' + to_right_type(sequence_number) + '/' + to_right_type(lock_token) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ def read_delete_subscription_message(self, topic_name, subscription_name, timeout='60'):
+ '''
+ Read and delete a message from a subscription as an atomic operation. This
+ operation should be used when a best-effort guarantee is sufficient for an
+ application; that is, using this operation it is possible for messages to
+ be lost if processing fails.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/messages/head'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return _create_message(self, respbody)
+
+ def delete_subscription_message(self, topic_name, subscription_name, sequence_number, lock_token):
+ '''
+ Completes processing on a locked message and delete it from the subscription.
+ This operation should only be called after processing a previously locked
+ message is successful to maintain At-Least-Once delivery assurances.
+
+ topic_name: the name of the topic
+ subscription_name: the name of the subscription
+ sequence_name: The sequence number of the message to be deleted as returned
+ in BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token: The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
+ '''
+ validate_not_none('topic-name', topic_name)
+ validate_not_none('subscription-name', subscription_name)
+ validate_not_none('sequence-number', sequence_number)
+ validate_not_none('lock-token', lock_token)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(topic_name) + '/subscriptions/' + to_right_type(subscription_name) + '/messages/' + to_right_type(sequence_number) + '/' + to_right_type(lock_token) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ def send_queue_message(self, queue_name, message=None):
+ '''
+ Sends a message into the specified queue. The limit to the number of messages
+ which may be present in the topic is governed by the message size the
+ MaxTopicSizeInMegaBytes. If this message will cause the queue to exceed its
+ quota, a quota exceeded error is returned and the message will be rejected.
+
+ queue_name: name of the queue
+ message: the Message object containing message body and properties.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + '/messages'
+ request.header = message.add_headers(request)
+ request.body = _get_request_body(message.body)
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ def peek_lock_queue_message(self, queue_name, timeout='60'):
+ '''
+ Automically retrieves and locks a message from a queue for processing. The
+ message is guaranteed not to be delivered to other receivers (on the same
+ subscription only) during the lock duration period specified in the queue
+ description. Once the lock expires, the message will be available to other
+ receivers. In order to complete processing of the message, the receiver
+ should issue a delete command with the lock ID received from this operation.
+ To abandon processing of the message and unlock it for other receivers,
+ an Unlock Message command should be issued, or the lock duration period
+ can expire.
+
+ queue_name: name of the queue
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + '/messages/head'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return _create_message(self, respbody)
+
+ def unlock_queue_message(self, queue_name, sequence_number, lock_token):
+ '''
+ Unlocks a message for processing by other receivers on a given subscription.
+ This operation deletes the lock object, causing the message to be unlocked.
+ A message must have first been locked by a receiver before this operation is
+ called.
+
+ queue_name: name of the queue
+ sequence_name: The sequence number of the message to be unlocked as returned
+ in BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token: The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
+ '''
+ validate_not_none('queue-name', queue_name)
+ validate_not_none('sequence-number', sequence_number)
+ validate_not_none('lock-token', lock_token)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + '/messages/' + to_right_type(sequence_number) + '/' + to_right_type(lock_token) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ def read_delete_queue_message(self, queue_name, timeout='60'):
+ '''
+ Reads and deletes a message from a queue as an atomic operation. This operation
+ should be used when a best-effort guarantee is sufficient for an application;
+ that is, using this operation it is possible for messages to be lost if
+ processing fails.
+
+ queue_name: name of the queue
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + '/messages/head'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+ return _create_message(self, respbody)
+
+ def delete_queue_message(self, queue_name, sequence_number, lock_token):
+ '''
+ Completes processing on a locked message and delete it from the queue. This
+ operation should only be called after processing a previously locked message
+ is successful to maintain At-Least-Once delivery assurances.
+
+ queue_name: name of the queue
+ sequence_name: The sequence number of the message to be deleted as returned
+ in BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token: The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
+ '''
+ validate_not_none('queue-name', queue_name)
+ validate_not_none('sequence_number', sequence_number)
+ validate_not_none('lock-token', lock_token)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(SERVICE_BUS_SERVICE, self.service_namespace)
+ request.uri = '/' + to_right_type(queue_name) + '/messages/' + to_right_type(sequence_number) + '/' + to_right_type(lock_token) + ''
+ request.uri, request.query = _update_request_uri_query(request)
+ request.header = _update_service_bus_header(request, self.account_key, self.issuer)
+ respbody = self._perform_request(request)
+
+
+ def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
+ if peek_lock:
+ return self.peek_lock_queue_message(queue_name, timeout)
+ else:
+ return self.read_delete_queue_message(queue_name, timeout)
+
+ def receive_subscription_message(self, topic_name, subscription_name, peek_lock=True, timeout=60):
+ if peek_lock:
+ return self.peek_lock_subscription_message(topic_name, subscription_name, timeout)
+ else:
+ return self.read_delete_subscription_message(topic_name, subscription_name, timeout)
+
+ def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01'):
+ self.status = None
+ self.message = None
+ self.respheader = None
+ self.requestid = None
+ self.service_namespace = service_namespace
+ self.account_key = account_key
+ self.issuer = issuer
+ if not service_namespace:
+ if os.environ.has_key(AZURE_SERVICEBUS_NAMESPACE):
+ self.service_namespace = os.environ[AZURE_SERVICEBUS_NAMESPACE]
+ if not account_key:
+ if os.environ.has_key(AZURE_SERVICEBUS_ACCESS_KEY):
+ self.account_key = os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
+ if not issuer:
+ if os.environ.has_key(AZURE_SERVICEBUS_ISSUER):
+ self.issuer = os.environ[AZURE_SERVICEBUS_ISSUER]
+
+ if not self.service_namespace or not self.account_key or not self.issuer:
+ raise WindowsAzureError('You need to provide servicebus namespace, access key and Issuer')
+
+ self.x_ms_version = x_ms_version
+ self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer, x_ms_version=self.x_ms_version)
+
+ def _perform_request(self, request):
+ try:
+ resp = self._httpclient.perform_request(request)
+ self.status = self._httpclient.status
+ self.message = self._httpclient.message
+ self.respheader = self._httpclient.respheader
+ except HTTPError as e:
+ self.status = e.status
+ self.message = e.message
+ self.respheader = e.respheader
+ return _service_bus_error_handler(e)
+
+ if not resp:
+ return None
+ return resp
+
diff --git a/src/windowsazure/storage/__init__.py b/src/windowsazure/storage/__init__.py
new file mode 100644
index 000000000000..0fa829f1703c
--- /dev/null
+++ b/src/windowsazure/storage/__init__.py
@@ -0,0 +1,499 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import datetime
+import base64
+import hashlib
+import hmac
+import urllib2
+from xml.dom import minidom
+import types
+from datetime import datetime
+
+from windowsazure import (remove_xmltag_namespace, create_entry, normalize_xml,
+ get_entry_properties, html_encode, WindowsAzureError)
+
+
+X_MS_VERSION = '2011-08-18'
+
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+from windowsazure import WindowsAzureData, DEV_ACCOUNT_NAME
+
+class EnumResultsBase:
+ def __init__(self):
+ self.prefix = ''
+ self.marker = ''
+ self.max_results = 0
+ self.next_marker = ''
+
+class ContainerEnumResults(EnumResultsBase):
+ def __init__(self):
+ EnumResultsBase.__init__(self)
+ self.containers = []
+ def __iter__(self):
+ return iter(self.containers)
+ def __len__(self):
+ return len(self.containers)
+ def __getitem__(self, index):
+ return self.containers[index]
+
+class Container(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.url = ''
+ self.properties = Properties()
+ self.metadata = Metadata()
+
+class Properties(WindowsAzureData):
+ def __init__(self):
+ self.last_modified = ''
+ self.etag = ''
+
+class Metadata(WindowsAzureData):
+ def __init__(self):
+ self.metadata_name = ''
+
+class RetentionPolicy(WindowsAzureData):
+ def __init__(self):
+ self.enabled = False
+ self.__dict__['days'] = None
+
+ def get_days(self):
+ return self.__dict__['days']
+
+ def set_days(self, value):
+ if value == '':
+ self.__dict__['days'] = 10
+ else:
+ self.__dict__['days'] = value
+
+ days = property(fget=get_days, fset=set_days)
+
+class Logging(WindowsAzureData):
+ def __init__(self):
+ self.version = '1.0'
+ self.delete = False
+ self.read = False
+ self.write = False
+ self.retention_policy = RetentionPolicy()
+
+class Metrics(WindowsAzureData):
+ def __init__(self):
+ self.version = '1.0'
+ self.enabled = False
+ self.include_apis = None
+ self.retention_policy = RetentionPolicy()
+
+class StorageServiceProperties(WindowsAzureData):
+ def __init__(self):
+ self.logging = Logging()
+ self.metrics = Metrics()
+
+class AccessPolicy(WindowsAzureData):
+ def __init__(self):
+ self.start = ''
+ self.expiry = ''
+ self.permission = ''
+
+class SignedIdentifier(WindowsAzureData):
+ def __init__(self):
+ self.id = ''
+ self.access_policy = AccessPolicy()
+
+class SignedIdentifiers(WindowsAzureData):
+ def __init__(self):
+ self.signed_identifiers = []
+ def __iter__(self):
+ return self.signed_identifiers
+
+class BlobEnumResults(EnumResultsBase):
+ def __init__(self):
+ EnumResultsBase.__init__(self)
+ self.blobs = []
+ def __iter__(self):
+ return iter(self.blobs)
+ def __len__(self):
+ return len(self.blobs)
+ def __getitem__(self, index):
+ return self.blobs[index]
+
+class Blob(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.snapshot = ''
+ self.url = ''
+ self.properties = BlobProperties()
+ self.metadata = Metadata()
+ self.blob_prefix = BlobPrefix()
+
+class BlobProperties(WindowsAzureData):
+ def __init__(self):
+ self.last_modified = ''
+ self.etag = ''
+ self.content_length = 0
+ self.content_type = ''
+ self.content_encoding = ''
+ self.content_language = ''
+ self.content_md5 = ''
+ self.xms_blob_sequence_number = 0
+ self.blob_type = ''
+ self.lease_status = ''
+
+class BlobPrefix(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+
+class BlobBlock(WindowsAzureData):
+ def __init__(self, id=None, size=None):
+ self.id = id
+ self.size = size
+
+class BlobBlockList(WindowsAzureData):
+ def __init__(self):
+ self.committed_blocks = []
+ self.uncommitted_blocks = []
+
+class BlockList(WindowsAzureData):
+ def __init__(self):
+ self.committed = []
+ self.uncommitted = []
+ self.latest = []
+
+class PageRange(WindowsAzureData):
+ def __init__(self):
+ self.start = 0
+ self.end = 0
+
+class PageList:
+ def __init__(self):
+ self.page_ranges = []
+ def __iter__(self):
+ return self.page_ranges
+
+class QueueEnumResults(EnumResultsBase):
+ def __init__(self):
+ EnumResultsBase.__init__(self)
+ self.queues = []
+ def __iter__(self):
+ return iter(self.queues)
+ def __len__(self):
+ return len(self.queues)
+ def __getitem__(self, index):
+ return self.queues[index]
+
+class Queue(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.url = ''
+ self.metadata = Metadata()
+
+class QueueMessageList:
+ def __init__(self):
+ self.queue_messages = []
+ def __iter__(self):
+ return iter(self.queue_messages)
+ def __len__(self):
+ return len(self.queue_messages)
+ def __getitem__(self, index):
+ return self.queue_messages[index]
+
+class QueueMessage(WindowsAzureData):
+ def __init__(self):
+ self.message_id = ''
+ self.insertion_time = ''
+ self.expiration_time = ''
+ self.pop_receipt = ''
+ self.time_next_visible = ''
+ self.dequeue_count = ''
+ self.message_text = ''
+
+class TableEnumResult(EnumResultsBase):
+ def __init__():
+ EnumResultsBase.__init__(self)
+ self.tables = []
+ def __iter__(self):
+ return iter(self.tables)
+ def __len__(self):
+ return len(self.tables)
+ def __getitem__(self, index):
+ return self.tables[index]
+
+class Entity(WindowsAzureData):
+ pass
+
+class EntityProperty(WindowsAzureData):
+ def __init__(self, type=None, value=None):
+ self.type = type
+ self.value = value
+ pass
+
+class Table(WindowsAzureData):
+ pass
+
+def _update_storage_header(request):
+ if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
+ request.header.append(('Content-Length', str(len(request.body))))
+
+ #append addtional headers base on the service
+ request.header.append(('x-ms-version', X_MS_VERSION))
+
+ #append x-ms-meta name, values to header
+ for name, value in request.header:
+ if 'x-ms-meta-name-values' in name and value:
+ for meta_name, meta_value in value.iteritems():
+ request.header.append(('x-ms-meta-' + meta_name, meta_value))
+ request.header.remove((name, value))
+ break
+ return request
+
+def _update_storage_blob_header(request, account_name, account_key):
+ request = _update_storage_header(request)
+ current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
+ request.header.append(('x-ms-date', current_time))
+ request.header.append(('Content-Type', 'application/octet-stream Charset=UTF-8'))
+ request.header.append(('Authorization', _sign_storage_blob_request(request, account_name, account_key)))
+
+ return request.header
+
+def _update_storage_queue_header(request, account_name, account_key):
+ return _update_storage_blob_header(request, account_name, account_key)
+
+def _update_storage_table_header(request, account_name, account_key):
+ request = _update_storage_header(request)
+ for name, value in request.header:
+ if name.lower() == 'content-type':
+ break;
+ else:
+ request.header.append(('Content-Type', 'application/atom+xml'))
+ request.header.append(('DataServiceVersion', '2.0;NetFx'))
+ request.header.append(('MaxDataServiceVersion', '2.0;NetFx'))
+ current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
+ request.header.append(('x-ms-date', current_time))
+ request.header.append(('Date', current_time))
+ request.header.append(('Authorization', _sign_storage_table_request(request, account_name, account_key)))
+ return request.header
+
+def _sign_storage_blob_request(request, account_name, account_key):
+ uri_path = request.uri.split('?')[0]
+
+ x_ms_headers = []
+ for name, value in request.header:
+ if 'x-ms' in name:
+ x_ms_headers.append((name.lower(), value))
+ x_ms_headers.sort()
+ #method to sign
+ string_to_sign = request.method + '\n'
+
+ #get headers to sign
+ headers_to_sign = ['content-encoding', 'content-Language', 'content-length',
+ 'content-md5', 'content-type', 'date', 'if-modified-since',
+ 'if-Match', 'if-none-match', 'if-unmodified-since', 'range']
+ for header in headers_to_sign:
+ for name, value in request.header:
+ if value and name.lower() == header:
+ string_to_sign += value + '\n'
+ find_header = True #remove
+ break
+ else:
+ string_to_sign += '\n'
+
+ #get x-ms header to sign if it is not storage table
+ x_ms_headers = []
+ for name, value in request.header:
+ if 'x-ms' in name:
+ x_ms_headers.append((name.lower(), value))
+ x_ms_headers.sort()
+ for name, value in x_ms_headers:
+ if value:
+ string_to_sign += ''.join([name, ':', value, '\n'])
+
+ #get account_name and uri path to sign
+ string_to_sign += '/' + account_name + uri_path
+
+ #get query string to sign if it is not table service
+ query_to_sign = request.query
+ query_to_sign.sort()
+
+ current_name = ''
+ for name, value in query_to_sign:
+ if value:
+ if current_name != name:
+ string_to_sign += '\n' + name + ':' + value
+ else:
+ string_to_sign += '\n' + ',' + value
+
+ #sign the request
+ decode_account_key = base64.b64decode(account_key)
+ signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
+ auth_string = 'SharedKey ' + account_name + ':' + base64.b64encode(signed_hmac_sha256.digest())
+ return auth_string
+
+def _sign_storage_table_request(request, account_name, account_key):
+ uri_path = request.uri.split('?')[0]
+
+ string_to_sign = request.method + '\n'
+ headers_to_sign = ['content-md5', 'content-type', 'date']
+ for header in headers_to_sign:
+ for name, value in request.header:
+ if value and name.lower() == header:
+ string_to_sign += value + '\n'
+ break
+ else:
+ string_to_sign += '\n'
+
+ #get account_name and uri path to sign
+ string_to_sign += ''.join(['/', account_name, uri_path])
+
+ for name, value in request.query:
+ if name == 'comp' and uri_path == '/':
+ string_to_sign += '?comp=' + value
+ break
+
+ #sign the request
+ decode_account_key = base64.b64decode(account_key)
+ signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
+ auth_string = 'SharedKey ' + account_name + ':' + base64.b64encode(signed_hmac_sha256.digest())
+ return auth_string
+
+def convert_entity_to_xml(source, use_local_storage=True):
+
+ entity_body = '{properties}'
+
+ if type(source) is types.InstanceType or isinstance(source, WindowsAzureData):
+ source = vars(source)
+
+ properties_str = ''
+
+ for name, value in source.iteritems():
+ mtype = ''
+ if type(value) is types.IntType:
+ mtype = 'Edm.Int32'
+ elif type(value) is types.FloatType:
+ mtype = 'Edm.Double'
+ elif type(value) is types.BooleanType:
+ mtype = 'Edm.Boolean'
+ elif isinstance(value, datetime):
+ mtype = 'Edm.DateTime'
+ value = value.strftime('%Y-%m-%dT%H:%M:%S')
+ elif isinstance(value, EntityProperty):
+ mtype = value.type
+ value = value.value
+
+ properties_str += ''.join(['', str(value), ''])
+
+ entity_body = entity_body.format(properties=properties_str)
+ xmlstr = create_entry(entity_body)
+ return xmlstr
+
+def convert_table_to_xml(table_name):
+ return convert_entity_to_xml({'TableName': table_name})
+
+def convert_block_list_to_xml(block_list):
+ if block_list is None:
+ return ''
+ xml = ''
+ for value in block_list.latest:
+ xml += '%s' % base64.b64encode(value)
+ for value in block_list.committed:
+ xml += '%s' % base64.b64encode(value)
+ for value in block_list.uncommitted:
+ xml += '%s' % base64.b64encode(value)
+ return xml+''
+
+def convert_xml_to_block_list(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ blob_block_list = BlobBlockList()
+
+ xmldoc = minidom.parseString(xmlstr)
+
+ xml_committed_blocks_list = xmldoc.getElementsByTagName('CommittedBlocks')
+ for xml_committed_blocks in xml_committed_blocks_list:
+ xml_blocks = xml_committed_blocks.getElementsByTagName('Block')
+ for xml_block in xml_blocks:
+ xml_block_id = base64.b64decode(xml_block.getElementsByTagName('Name')[0].firstChild.nodeValue)
+ xml_block_size = int(xml_block.getElementsByTagName('Size')[0].firstChild.nodeValue)
+ blob_block_list.committed_blocks.append(BlobBlock(xml_block_id, xml_block_size))
+
+ xml_uncommitted_blocks_list = xmldoc.getElementsByTagName('UncommittedBlocks')
+ for xml_uncommitted_blocks in xml_uncommitted_blocks_list:
+ xml_blocks = xml_uncommitted_blocks.getElementsByTagName('Block')
+ for xml_block in xml_blocks:
+ xml_block_id = base64.b64decode(xml_block.getElementsByTagName('Name')[0].firstChild.nodeValue)
+ xml_block_size = int(xml_block.getElementsByTagName('Size')[0].firstChild.nodeValue)
+ blob_block_list.uncommitted_blocks.append(BlobBlock(xml_block_id, xml_block_size))
+
+ return blob_block_list
+
+def convert_xml_to_entity(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ xmlstr = remove_xmltag_namespace(xmlstr)
+ xmldoc = minidom.parseString(xmlstr)
+ xml_properties = xmldoc.getElementsByTagName('properties')
+
+ if not xml_properties:
+ return None
+
+ entity = Entity()
+ for xml_property in xml_properties[0].childNodes:
+ if xml_property.firstChild:
+ name = xml_property.nodeName
+ if name in ['Timestamp']:
+ continue
+ value = xml_property.firstChild.nodeValue
+
+ isnull = xml_property.getAttribute('null')
+ mtype = xml_property.getAttribute('type')
+ property = EntityProperty()
+ if not isnull and not mtype:
+ setattr(entity, name, value)
+ else:
+ setattr(property, 'value', value)
+ if isnull:
+ setattr(property, 'isnull', str(isnull))
+ if mtype:
+ setattr(property, 'type', str(mtype))
+ setattr(entity, name, property)
+
+ return entity
+
+def convert_xml_to_table(xmlstr):
+ xmlstr = normalize_xml(xmlstr)
+ xmlstr = remove_xmltag_namespace(xmlstr)
+ table = Table()
+ entity = convert_xml_to_entity(xmlstr)
+ setattr(table, 'name', entity.TableName)
+ for name, value in get_entry_properties(xmlstr, ['updated', 'name']).iteritems():
+ setattr(table, name, value)
+ return table
+
+def _storage_error_handler(http_error):
+ if http_error.status == 409:
+ raise WindowsAzureError('Conflict')
+ elif http_error.status == 404:
+ raise WindowsAzureError('Not Found')
+ else:
+ raise WindowsAzureError('Unknown Error')
\ No newline at end of file
diff --git a/src/windowsazure/storage/cloudblobclient.py b/src/windowsazure/storage/cloudblobclient.py
new file mode 100644
index 000000000000..eaf781884d83
--- /dev/null
+++ b/src/windowsazure/storage/cloudblobclient.py
@@ -0,0 +1,764 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import base64
+import os
+import urllib2
+
+from windowsazure.storage import *
+from windowsazure.storage.storageclient import _StorageClient
+from windowsazure.storage import (_update_storage_blob_header,
+ convert_block_list_to_xml, convert_xml_to_block_list)
+from windowsazure import (validate_length, validate_values, validate_not_none, Feed, _Request,
+ convert_xml_to_feeds, to_right_type,
+ _get_request_body, _update_request_uri_query, get_host,
+ _dont_fail_on_exist, _dont_fail_not_exist, HTTPError,
+ WindowsAzureError, _parse_response, _Request, convert_class_to_xml,
+ _parse_response_for_dict, _parse_response_for_dict_prefix,
+ _parse_response_for_dict_filter, _parse_response_for_dict_special,
+ BLOB_SERVICE, QUEUE_SERVICE, TABLE_SERVICE, SERVICE_BUS_SERVICE)
+
+class CloudBlobClient(_StorageClient):
+ '''
+ This is the main class managing Blob resources.
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ '''
+
+ def list_containers(self, prefix=None, marker=None, maxresults=None, include=None):
+ '''
+ The List Containers operation returns a list of the containers under the specified account.
+
+ prefix: Optional. Filters the results to return only containers whose name begins with
+ the specified prefix..
+ marker: Optional. A string value that identifies the portion of the list to be returned
+ with the next list operation.
+ maxresults: Optional. Specifies the maximum number of containers to return.
+ include: Optional. Include this parameter to specify that the container's metadata be
+ returned as part of the response body.
+ '''
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?comp=list'
+ request.query = [
+ ('prefix', to_right_type(prefix)),
+ ('marker', to_right_type(marker)),
+ ('maxresults', to_right_type(maxresults)),
+ ('include', to_right_type(include))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, ContainerEnumResults)
+
+ def create_container(self, container_name, x_ms_meta_name_values=None, x_ms_blob_public_access=None, fail_on_exist=False):
+ '''
+ Creates a new container under the specified account. If the container with the same name
+ already exists, the operation fails.
+
+ x_ms_meta_name_values: Optional. A dict with name_value pairs to associate with the
+ container as metadata. Example:{'Category':'test'}
+ x_ms_blob_public_access: Optional. Possible values include: container, blob.
+ fail_on_exist: specify whether throw exception when container exists.
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container'
+ request.header = [
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('x-ms-blob-public-access', to_right_type(x_ms_blob_public_access))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_container_properties(self, container_name):
+ '''
+ Returns all user-defined metadata and system properties for the specified container.
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict(self)
+
+ def get_container_metadata(self, container_name):
+ '''
+ Returns all user-defined metadata for the specified container. The metadata will be
+ in returned dictionary['x-ms-meta-(name)'].
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container&comp=metadata'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict(self)
+
+ def set_container_metadata(self, container_name, x_ms_meta_name_values=None):
+ '''
+ Sets one or more user-defined name-value pairs for the specified container.
+
+ x_ms_meta_name_values: A dict containing name, value for metadata. Example: {'category':'test'}
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container&comp=metadata'
+ request.header = [('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def get_container_acl(self, container_name):
+ '''
+ Gets the permissions for the specified container.
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container&comp=acl'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, SignedIdentifiers)
+
+ def set_container_acl(self, container_name, signed_identifiers=None, x_ms_blob_public_access=None):
+ '''
+ Sets the permissions for the specified container.
+
+ x_ms_blob_public_access: Optional. Possible values include 'container' and 'blob'.
+ signed_identifiers: SignedIdentifers instance
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container&comp=acl'
+ request.header = [('x-ms-blob-public-access', to_right_type(x_ms_blob_public_access))]
+ request.body = _get_request_body(convert_class_to_xml(signed_identifiers))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def delete_container(self, container_name, fail_not_exist=False):
+ '''
+ Marks the specified container for deletion.
+
+ fail_not_exist: specify whether throw exception when container doesn't exist.
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def list_blobs(self, container_name):
+ '''
+ Returns the list of blobs under the specified container.
+ '''
+ validate_not_none('container-name', container_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '?restype=container&comp=list'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, BlobEnumResults)
+
+ def set_blob_service_properties(self, storage_service_properties, timeout=None):
+ '''
+ Sets the properties of a storage account's Blob service, including Windows Azure
+ Storage Analytics. You can also use this operation to set the default request
+ version for all incoming requests that do not have a version specified.
+
+ storage_service_properties: a StorageServiceProperties object.
+ timeout: Optional. The timeout parameter is expressed in seconds. For example, the
+ following value sets a timeout of 30 seconds for the request: timeout=30.
+ '''
+ validate_not_none('class:storage_service_properties', storage_service_properties)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?restype=service&comp=properties'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.body = _get_request_body(convert_class_to_xml(storage_service_properties))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def get_blob_service_properties(self, timeout=None):
+ '''
+ Gets the properties of a storage account's Blob service, including Windows Azure
+ Storage Analytics.
+
+ timeout: Optional. The timeout parameter is expressed in seconds. For example, the
+ following value sets a timeout of 30 seconds for the request: timeout=30.
+ '''
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?restype=service&comp=properties'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, StorageServiceProperties)
+
+ def get_blob_properties(self, container_name, blob_name, x_ms_lease_id=None):
+ '''
+ Returns all user-defined metadata, standard HTTP properties, and system properties for the blob.
+
+ x_ms_lease_id: Required if the blob has an active lease.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'HEAD'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + ''
+ request.header = [('x-ms-lease-id', to_right_type(x_ms_lease_id))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict(self)
+
+ def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_md5=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_lease_id=None):
+ '''
+ Sets system properties on the blob.
+
+ x_ms_blob_cache_control: Optional. Modifies the cache control string for the blob.
+ x_ms_blob_content_type: Optional. Sets the blob's content type.
+ x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
+ x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
+ x_ms_blob_content_language: Optional. Sets the blob's content language.
+ x_ms_lease_id: Required if the blob has an active lease.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=properties'
+ request.header = [
+ ('x-ms-blob-cache-control', to_right_type(x_ms_blob_cache_control)),
+ ('x-ms-blob-content-type', to_right_type(x_ms_blob_content_type)),
+ ('x-ms-blob-content-md5', to_right_type(x_ms_blob_content_md5)),
+ ('x-ms-blob-content-encoding', to_right_type(x_ms_blob_content_encoding)),
+ ('x-ms-blob-content-language', to_right_type(x_ms_blob_content_language)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_encoding=None, content_language=None, content_m_d5=None, cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_blob_cache_control=None, x_ms_meta_name_values=None, x_ms_lease_id=None, x_ms_blob_content_length=None, x_ms_blob_sequence_number=None):
+ '''
+ Creates a new block blob or page blob, or updates the content of an existing block blob.
+
+ container_name: the name of container to put the blob
+ blob_name: the name of blob
+ x_ms_blob_type: Required. Could be BlockBlob or PageBlob
+ x_ms_meta_name_values: A dict containing name, value for metadata.
+ x_ms_lease_id: Required if the blob has an active lease.
+ blob: the content of blob.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ validate_not_none('binary:blob', blob)
+ validate_not_none('x-ms-blob-type', x_ms_blob_type)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + ''
+ request.header = [
+ ('x-ms-blob-type', to_right_type(x_ms_blob_type)),
+ ('Content-Encoding', to_right_type(content_encoding)),
+ ('Content-Language', to_right_type(content_language)),
+ ('Content-MD5', to_right_type(content_m_d5)),
+ ('Cache-Control', to_right_type(cache_control)),
+ ('x-ms-blob-content-type', to_right_type(x_ms_blob_content_type)),
+ ('x-ms-blob-content-encoding', to_right_type(x_ms_blob_content_encoding)),
+ ('x-ms-blob-content-language', to_right_type(x_ms_blob_content_language)),
+ ('x-ms-blob-content-md5', to_right_type(x_ms_blob_content_md5)),
+ ('x-ms-blob-cache-control', to_right_type(x_ms_blob_cache_control)),
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id)),
+ ('x-ms-blob-content-length', to_right_type(x_ms_blob_content_length)),
+ ('x-ms-blob-sequence-number', to_right_type(x_ms_blob_sequence_number))
+ ]
+ request.body = _get_request_body(blob)
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def get_blob(self, container_name, blob_name, snapshot=None, x_ms_range=None, x_ms_lease_id=None, x_ms_range_get_content_md5=None):
+ '''
+ Reads or downloads a blob from the system, including its metadata and properties.
+
+ container_name: the name of container to get the blob
+ blob_name: the name of blob
+ x_ms_range: Optional. Return only the bytes of the blob in the specified range.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + ''
+ request.header = [
+ ('x-ms-range', to_right_type(x_ms_range)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id)),
+ ('x-ms-range-get-content-md5', to_right_type(x_ms_range_get_content_md5))
+ ]
+ request.query = [('snapshot', to_right_type(snapshot))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return respbody
+
+ def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease_id=None):
+ '''
+ Returns all user-defined metadata for the specified blob or snapshot.
+
+ container_name: the name of container containing the blob.
+ blob_name: the name of blob to get metadata.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=metadata'
+ request.header = [('x-ms-lease-id', to_right_type(x_ms_lease_id))]
+ request.query = [('snapshot', to_right_type(snapshot))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict(self)
+
+ def set_blob_metadata(self, container_name, blob_name, x_ms_meta_name_values=None, x_ms_lease_id=None):
+ '''
+ Sets user-defined metadata for the specified blob as one or more name-value pairs.
+
+ container_name: the name of container containing the blob
+ blob_name: the name of blob
+ x_ms_meta_name_values: Dict containing name and value pairs.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=metadata'
+ request.header = [
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def lease_blob(self, container_name, blob_name, x_ms_lease_action, x_ms_lease_id=None):
+ '''
+ Establishes and manages a one-minute lock on a blob for write operations.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ x_ms_lease_id: Any GUID format string
+ x_ms_lease_action: Required. Possible values: acquire|renew|release|break
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ validate_not_none('x-ms-lease-action', x_ms_lease_action)
+ validate_values('x-ms-lease-action', to_right_type(x_ms_lease_action), 'acquire|renew|release|break')
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=lease'
+ request.header = [
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id)),
+ ('x-ms-lease-action', to_right_type(x_ms_lease_action))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict_filter(self, filter=['x-ms-lease-id'])
+
+ def snapshot_blob(self, container_name, blob_name, x_ms_meta_name_values=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, x_ms_lease_id=None):
+ '''
+ Creates a read-only snapshot of a blob.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ x_ms_meta_name_values: Optional. Dict containing name and value pairs.
+ if_modified_since: Optional. Datetime string.
+ if_unmodified_since: DateTime string.
+ if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
+ if_none_match: Optional. An ETag value
+ x_ms_lease_id: Optional. If this header is specified, the operation will be performed
+ only if both of the following conditions are met.
+ 1. The blob's lease is currently active
+ 2. The lease ID specified in the request matches that of the blob.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=snapshot'
+ request.header = [
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('If-Modified-Since', to_right_type(if_modified_since)),
+ ('If-Unmodified-Since', to_right_type(if_unmodified_since)),
+ ('If-Match', to_right_type(if_match)),
+ ('If-None-Match', to_right_type(if_none_match)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_values=None, x_ms_source_if_modified_since=None, x_ms_source_if_unmodified_since=None, x_ms_source_if_match=None, x_ms_source_if_none_match=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, x_ms_lease_id=None, x_ms_source_lease_id=None):
+ '''
+ Copies a blob to a destination within the storage account.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ x_ms_copy_source: the blob to be copied. Should be absolute path format.
+ x_ms_meta_name_values: Optional. Dict containing name and value pairs.
+ x_ms_source_if_modified_since: Optional. An ETag value. Specify this conditional
+ header to copy the source blob only if its ETag matches the value specified.
+ x_ms_source_if_unmodified_since: Optional. An ETag value. Specify this conditional
+ header to copy the blob only if its ETag does not match the value specified.
+ x_ms_source_if_match: Optional. A DateTime value. Specify this conditional header to copy
+ the blob only if the source blob has been modified since the specified date/time.
+ x_ms_source_if_none_match: Optional. An ETag value. Specify this conditional header to
+ copy the source blob only if its ETag matches the value specified.
+ if_modified_since: Optional. Datetime string.
+ if_unmodified_since: DateTime string.
+ if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
+ if_none_match: Optional. An ETag value
+ x_ms_lease_id: Optional. If this header is specified, the operation will be performed
+ only if both of the following conditions are met.
+ 1. The blob's lease is currently active
+ 2. The lease ID specified in the request matches that of the blob.
+ x-ms-meta-name-values: a dict containing name, value for metadata.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ validate_not_none('x-ms-copy-source', x_ms_copy_source)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + ''
+ request.header = [
+ ('x-ms-copy-source', to_right_type(x_ms_copy_source)),
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('x-ms-source-if-modified-since', to_right_type(x_ms_source_if_modified_since)),
+ ('x-ms-source-if-unmodified-since', to_right_type(x_ms_source_if_unmodified_since)),
+ ('x-ms-source-if-match', to_right_type(x_ms_source_if_match)),
+ ('x-ms-source-if-none-match', to_right_type(x_ms_source_if_none_match)),
+ ('If-Modified-Since', to_right_type(if_modified_since)),
+ ('If-Unmodified-Since', to_right_type(if_unmodified_since)),
+ ('If-Match', to_right_type(if_match)),
+ ('If-None-Match', to_right_type(if_none_match)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id)),
+ ('x-ms-source-lease-id', to_right_type(x_ms_source_lease_id))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def delete_blob(self, container_name, blob_name, snapshot=None, x_ms_copy_source=None, x_ms_meta_name_values=None, x_ms_source_if_modified_since=None, x_ms_source_if_unmodified_since=None, x_ms_source_if_match=None, x_ms_source_if_none_match=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, x_ms_lease_id=None, x_ms_source_lease_id=None):
+ '''
+ Marks the specified blob or snapshot for deletion. The blob is later deleted
+ during garbage collection.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ x_ms_copy_source: the blob to be copied. Should be absolute path format.
+ x_ms_meta_name_values: Optional. Dict containing name and value pairs.
+ x_ms_source_if_modified_since: Optional. An ETag value. Specify this conditional
+ header to copy the source blob only if its ETag matches the value specified.
+ x_ms_source_if_unmodified_since: Optional. An ETag value. Specify this conditional
+ header to copy the blob only if its ETag does not match the value specified.
+ x_ms_source_if_match: Optional. A DateTime value. Specify this conditional header to copy
+ the blob only if the source blob has been modified since the specified date/time.
+ x_ms_source_if_none_match: Optional. An ETag value. Specify this conditional header to
+ copy the source blob only if its ETag matches the value specified.
+ if_modified_since: Optional. Datetime string.
+ if_unmodified_since: DateTime string.
+ if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
+ if_none_match: Optional. An ETag value
+ x_ms_lease_id: Optional. If this header is specified, the operation will be performed
+ only if both of the following conditions are met.
+ 1. The blob's lease is currently active
+ 2. The lease ID specified in the request matches that of the blob.
+ x-ms-meta-name-values: a dict containing name, value for metadata.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + ''
+ request.header = [
+ ('x-ms-copy-source', to_right_type(x_ms_copy_source)),
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('x-ms-source-if-modified-since', to_right_type(x_ms_source_if_modified_since)),
+ ('x-ms-source-if-unmodified-since', to_right_type(x_ms_source_if_unmodified_since)),
+ ('x-ms-source-if-match', to_right_type(x_ms_source_if_match)),
+ ('x-ms-source-if-none-match', to_right_type(x_ms_source_if_none_match)),
+ ('If-Modified-Since', to_right_type(if_modified_since)),
+ ('If-Unmodified-Since', to_right_type(if_unmodified_since)),
+ ('If-Match', to_right_type(if_match)),
+ ('If-None-Match', to_right_type(if_none_match)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id)),
+ ('x-ms-source-lease-id', to_right_type(x_ms_source_lease_id))
+ ]
+ request.query = [('snapshot', to_right_type(snapshot))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def put_block(self, container_name, blob_name, block, blockid, content_m_d5=None, x_ms_lease_id=None):
+ '''
+ Creates a new block to be committed as part of a blob.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
+ the integrity of the blob during transport. When this header is specified,
+ the storage service checks the hash that has arrived with the one that was sent.
+ x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
+ a blob with an active lease, specify the valid lease ID for this header.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ validate_not_none('binary:block', block)
+ validate_not_none('blockid', blockid)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=block'
+ request.header = [
+ ('Content-MD5', to_right_type(content_m_d5)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id))
+ ]
+ request.query = [('blockid', base64.b64encode(to_right_type(blockid)))]
+ request.body = _get_request_body(block)
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def put_block_list(self, container_name, blob_name, block_list, content_m_d5=None, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_meta_name_values=None, x_ms_lease_id=None):
+ '''
+ Writes a blob by specifying the list of block IDs that make up the blob. In order to
+ be written as part of a blob, a block must have been successfully written to the server
+ in a prior Put Block (REST API) operation.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ x_ms_meta_name_values: Optional. Dict containing name and value pairs.
+ x_ms_blob_cache_control: Optional. Sets the blob's cache control. If specified, this
+ property is stored with the blob and returned with a read request.
+ x_ms_blob_content_type: Optional. Sets the blob's content type. If specified, this
+ property is stored with the blob and returned with a read request.
+ x_ms_blob_content_encoding: Optional. Sets the blob's content encoding. If specified,
+ this property is stored with the blob and returned with a read request.
+ x_ms_blob_content_language: Optional. Set the blob's content language. If specified,
+ this property is stored with the blob and returned with a read request.
+ x_ms_blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash
+ is not validated, as the hashes for the individual blocks were validated when
+ each was uploaded.
+ content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
+ the integrity of the blob during transport. When this header is specified,
+ the storage service checks the hash that has arrived with the one that was sent.
+ x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
+ a blob with an active lease, specify the valid lease ID for this header.
+ x-ms-meta-name-values: a dict containing name, value for metadata.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ validate_not_none('class:block_list', block_list)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=blocklist'
+ request.header = [
+ ('Content-MD5', to_right_type(content_m_d5)),
+ ('x-ms-blob-cache-control', to_right_type(x_ms_blob_cache_control)),
+ ('x-ms-blob-content-type', to_right_type(x_ms_blob_content_type)),
+ ('x-ms-blob-content-encoding', to_right_type(x_ms_blob_content_encoding)),
+ ('x-ms-blob-content-language', to_right_type(x_ms_blob_content_language)),
+ ('x-ms-blob-content-md5', to_right_type(x_ms_blob_content_md5)),
+ ('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id))
+ ]
+ request.body = _get_request_body(convert_block_list_to_xml(block_list))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype=None, x_ms_lease_id=None):
+ '''
+ Retrieves the list of blocks that have been uploaded as part of a block blob.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ snapshot: Optional. Datetime to determine the time to retrieve the blocks.
+ blocklisttype: Specifies whether to return the list of committed blocks, the
+ list of uncommitted blocks, or both lists together. Valid values are
+ committed, uncommitted, or all.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=blocklist'
+ request.header = [('x-ms-lease-id', to_right_type(x_ms_lease_id))]
+ request.query = [
+ ('snapshot', to_right_type(snapshot)),
+ ('blocklisttype', to_right_type(blocklisttype))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_block_list(respbody)
+
+ def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write, content_m_d5=None, x_ms_lease_id=None, x_ms_if_sequence_number_lte=None, x_ms_if_sequence_number_lt=None, x_ms_if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None):
+ '''
+ Writes a range of pages to a page blob.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ x_ms_range: Required. Specifies the range of bytes to be written as a page. Both the start
+ and end of the range must be specified. Must be in format: bytes=startByte-endByte.
+ Given that pages must be aligned with 512-byte boundaries, the start offset must be
+ a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
+ byte ranges are 0-511, 512-1023, etc.
+ x_ms_page_write: Required. You may specify one of the following options:
+ 1. Update: Writes the bytes specified by the request body into the specified range.
+ The Range and Content-Length headers must match to perform the update.
+ 2. Clear: Clears the specified range and releases the space used in storage for
+ that range. To clear a range, set the Content-Length header to zero, and the Range
+ header to a value that indicates the range to clear, up to maximum blob size.
+ x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
+ with an active lease, specify the valid lease ID for this header.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ validate_not_none('binary:page', page)
+ validate_not_none('x-ms-range', x_ms_range)
+ validate_not_none('x-ms-page-write', x_ms_page_write)
+ validate_values('x-ms-page-write', to_right_type(x_ms_page_write), 'update|clear')
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=page'
+ request.header = [
+ ('x-ms-range', to_right_type(x_ms_range)),
+ ('Content-MD5', to_right_type(content_m_d5)),
+ ('x-ms-page-write', to_right_type(x_ms_page_write)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id)),
+ ('x-ms-if-sequence-number-lte', to_right_type(x_ms_if_sequence_number_lte)),
+ ('x-ms-if-sequence-number-lt', to_right_type(x_ms_if_sequence_number_lt)),
+ ('x-ms-if-sequence-number-eq', to_right_type(x_ms_if_sequence_number_eq)),
+ ('If-Modified-Since', to_right_type(if_modified_since)),
+ ('If-Unmodified-Since', to_right_type(if_unmodified_since)),
+ ('If-Match', to_right_type(if_match)),
+ ('If-None-Match', to_right_type(if_none_match))
+ ]
+ request.body = _get_request_body(page)
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def get_page_ranges(self, container_name, blob_name, snapshot=None, range=None, x_ms_range=None, x_ms_lease_id=None):
+ '''
+ Retrieves the page ranges for a blob.
+
+ container_name: the name of container.
+ blob_name: the name of blob
+ _ms_range: Optional. Specifies the range of bytes to be written as a page. Both the start
+ and end of the range must be specified. Must be in format: bytes=startByte-endByte.
+ Given that pages must be aligned with 512-byte boundaries, the start offset must be
+ a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
+ byte ranges are 0-511, 512-1023, etc.
+ x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
+ with an active lease, specify the valid lease ID for this header.
+ '''
+ validate_not_none('container-name', container_name)
+ validate_not_none('blob-name', blob_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(BLOB_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(container_name) + '/' + to_right_type(blob_name) + '?comp=pagelist'
+ request.header = [
+ ('Range', to_right_type(range)),
+ ('x-ms-range', to_right_type(x_ms_range)),
+ ('x-ms-lease-id', to_right_type(x_ms_lease_id))
+ ]
+ request.query = [('snapshot', to_right_type(snapshot))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_blob_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, PageList)
+
+
diff --git a/src/windowsazure/storage/cloudqueueclient.py b/src/windowsazure/storage/cloudqueueclient.py
new file mode 100644
index 000000000000..47c26274c957
--- /dev/null
+++ b/src/windowsazure/storage/cloudqueueclient.py
@@ -0,0 +1,333 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import base64
+import os
+import urllib2
+
+from windowsazure.storage import *
+from windowsazure.storage.storageclient import _StorageClient
+from windowsazure.storage import (_update_storage_queue_header)
+from windowsazure import (validate_length, validate_values, validate_not_none, Feed, _Request,
+ convert_xml_to_feeds, to_right_type,
+ _get_request_body, _update_request_uri_query, get_host,
+ _dont_fail_on_exist, _dont_fail_not_exist, HTTPError,
+ WindowsAzureError, _parse_response, _Request, convert_class_to_xml,
+ _parse_response_for_dict, _parse_response_for_dict_prefix,
+ _parse_response_for_dict_filter, _parse_response_for_dict_special,
+ BLOB_SERVICE, QUEUE_SERVICE, TABLE_SERVICE, SERVICE_BUS_SERVICE)
+
+class CloudQueueClient(_StorageClient):
+ '''
+ This is the main class managing Blob resources.
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ '''
+
+ def get_queue_service_properties(self, timeout=None):
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?restype=service&comp=properties'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, StorageServiceProperties)
+
+ def list_queues(self, prefix=None, marker=None, maxresults=None, include=None):
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?comp=list'
+ request.query = [
+ ('prefix', to_right_type(prefix)),
+ ('marker', to_right_type(marker)),
+ ('maxresults', to_right_type(maxresults)),
+ ('include', to_right_type(include))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, QueueEnumResults)
+
+ def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=False):
+ '''
+ Creates a queue under the given account.
+
+ queue_name: name of the queue.
+ x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
+ with the queue as metadata.
+ fail_on_exist: specify whether throw exception when queue exists.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + ''
+ request.header = [('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def delete_queue(self, queue_name, fail_not_exist=False):
+ '''
+ Permanently deletes the specified queue.
+
+ queue_name: name of the queue.
+ fail_not_exist: specify whether throw exception when queue doesn't exist.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + ''
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_queue_metadata(self, queue_name):
+ '''
+ Retrieves user-defined metadata and queue properties on the specified queue.
+ Metadata is associated with the queue as name-values pairs.
+
+ queue_name: name of the queue.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '?comp=metadata'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict(self)
+
+ def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
+ '''
+ Sets user-defined metadata on the specified queue. Metadata is associated
+ with the queue as name-value pairs.
+
+ queue_name: name of the queue.
+ x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
+ with the queue as metadata.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '?comp=metadata'
+ request.header = [('x-ms-meta-name-values', to_right_type(x_ms_meta_name_values))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def put_message(self, queue_name, message_text, visibilitytimeout=None, messagettl=None):
+ '''
+ Adds a new message to the back of the message queue. A visibility timeout can
+ also be specified to make the message invisible until the visibility timeout
+ expires. A message must be in a format that can be included in an XML request
+ with UTF-8 encoding. The encoded message can be up to 64KB in size for versions
+ 2011-08-18 and newer, or 8KB in size for previous versions.
+
+ queue_name: name of the queue.
+ visibilitytimeout: Optional. If specified, the request must be made using an
+ x-ms-version of 2011-08-18 or newer.
+ messagettl: Optional. Specifies the time-to-live interval for the message,
+ in seconds. The maximum time-to-live allowed is 7 days. If this parameter
+ is omitted, the default time-to-live is 7 days.
+ '''
+ validate_not_none('queue-name', queue_name)
+ validate_not_none('MessageText', message_text)
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '/messages'
+ request.query = [
+ ('visibilitytimeout', to_right_type(visibilitytimeout)),
+ ('messagettl', to_right_type(messagettl))
+ ]
+ request.body = _get_request_body(' \
+ \
+ \
+ ' + to_right_type(message_text) + ' \
+')
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def get_messages(self, queue_name, numofmessages=None, visibilitytimeout=None):
+ '''
+ Retrieves one or more messages from the front of the queue.
+
+ queue_name: name of the queue.
+ numofmessages: Optional. A nonzero integer value that specifies the number of
+ messages to retrieve from the queue, up to a maximum of 32. If fewer are
+ visible, the visible messages are returned. By default, a single message
+ is retrieved from the queue with this operation.
+ visibilitytimeout: Required. Specifies the new visibility timeout value, in
+ seconds, relative to server time. The new value must be larger than or
+ equal to 1 second, and cannot be larger than 7 days, or larger than 2
+ hours on REST protocol versions prior to version 2011-08-18. The visibility
+ timeout of a message can be set to a value later than the expiry time.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '/messages'
+ request.query = [
+ ('numofmessages', to_right_type(numofmessages)),
+ ('visibilitytimeout', to_right_type(visibilitytimeout))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, QueueMessageList)
+
+ def peek_messages(self, queue_name, numofmessages=None):
+ '''
+ Retrieves one or more messages from the front of the queue, but does not alter
+ the visibility of the message.
+
+ queue_name: name of the queue.
+ numofmessages: Optional. A nonzero integer value that specifies the number of
+ messages to peek from the queue, up to a maximum of 32. By default,
+ a single message is peeked from the queue with this operation.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '/messages?peekonly=true'
+ request.query = [('numofmessages', to_right_type(numofmessages))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, QueueMessageList)
+
+ def delete_message(self, queue_name, message_id, popreceipt):
+ '''
+ Deletes the specified message.
+
+ queue_name: name of the queue.
+ popreceipt: Required. A valid pop receipt value returned from an earlier call
+ to the Get Messages or Update Message operation.
+ '''
+ validate_not_none('queue-name', queue_name)
+ validate_not_none('message-id', message_id)
+ validate_not_none('popreceipt', popreceipt)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '/messages/' + to_right_type(message_id) + ''
+ request.query = [('popreceipt', to_right_type(popreceipt))]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def clear_messages(self, queue_name):
+ '''
+ Deletes all messages from the specified queue.
+
+ queue_name: name of the queue.
+ '''
+ validate_not_none('queue-name', queue_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '/messages'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def update_message(self, queue_name, message_id, message_text, popreceipt, visibilitytimeout):
+ '''
+ Updates the visibility timeout of a message. You can also use this
+ operation to update the contents of a message.
+
+ queue_name: name of the queue.
+ popreceipt: Required. A valid pop receipt value returned from an earlier call
+ to the Get Messages or Update Message operation.
+ visibilitytimeout: Required. Specifies the new visibility timeout value, in
+ seconds, relative to server time. The new value must be larger than or
+ equal to 0, and cannot be larger than 7 days. The visibility timeout
+ of a message cannot be set to a value later than the expiry time. A
+ message can be updated until it has been deleted or has expired.
+ '''
+ validate_not_none('queue-name', queue_name)
+ validate_not_none('message-id', message_id)
+ validate_not_none('MessageText', message_text)
+ validate_not_none('popreceipt', popreceipt)
+ validate_not_none('visibilitytimeout', visibilitytimeout)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(queue_name) + '/messages/' + to_right_type(message_id) + ''
+ request.query = [
+ ('popreceipt', to_right_type(popreceipt)),
+ ('visibilitytimeout', to_right_type(visibilitytimeout))
+ ]
+ request.body = _get_request_body(' \
+ \
+ \
+ ;' + to_right_type(message_text) + ' \
+')
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict_filter(self, filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])
+
+ def set_queue_service_properties(self, storage_service_properties, timeout=None):
+ '''
+ Sets the properties of a storage account's Queue service, including Windows Azure
+ Storage Analytics.
+
+ storage_service_properties: a StorageServiceProperties object.
+ timeout: Optional. The timeout parameter is expressed in seconds.
+ '''
+ validate_not_none('class:storage_service_properties', storage_service_properties)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(QUEUE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?restype=service&comp=properties'
+ request.query = [('timeout', to_right_type(timeout))]
+ request.body = _get_request_body(convert_class_to_xml(storage_service_properties))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_queue_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+
diff --git a/src/windowsazure/storage/cloudstorageaccount.py b/src/windowsazure/storage/cloudstorageaccount.py
new file mode 100644
index 000000000000..7f2a5463fbbe
--- /dev/null
+++ b/src/windowsazure/storage/cloudstorageaccount.py
@@ -0,0 +1,30 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+from windowsazure.storage.cloudblobclient import CloudBlobClient
+from windowsazure.storage.cloudtableclient import CloudTableClient
+from windowsazure.storage.cloudqueueclient import CloudQueueClient
+
+class CloudStorageAccount:
+
+ def __init__(self, account_name=None, account_key=None):
+ self.account_name = account_name
+ self.account_key = account_key
+
+ def create_blob_client(self):
+ return CloudBlobClient(self.account_name, self.account_key)
+
+ def create_table_client(self):
+ return CloudTableClient(self.account_name, self.account_key)
+
+ def create_queue_client(self):
+ return CloudQueueClient(self.account_name, self.account_key)
\ No newline at end of file
diff --git a/src/windowsazure/storage/cloudtableclient.py b/src/windowsazure/storage/cloudtableclient.py
new file mode 100644
index 000000000000..eb60c3c59999
--- /dev/null
+++ b/src/windowsazure/storage/cloudtableclient.py
@@ -0,0 +1,359 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import base64
+import os
+import urllib2
+
+from windowsazure.storage import *
+from windowsazure.storage.storageclient import _StorageClient
+from windowsazure.storage import (_update_storage_table_header,
+ convert_table_to_xml, convert_xml_to_table,
+ convert_entity_to_xml, convert_xml_to_entity)
+from windowsazure.http.batchclient import _BatchClient
+from windowsazure import (validate_length, validate_values, validate_not_none, Feed, _Request,
+ convert_xml_to_feeds, to_right_type,
+ _get_request_body, _update_request_uri_query, get_host,
+ _dont_fail_on_exist, _dont_fail_not_exist, HTTPError,
+ WindowsAzureError, _parse_response, _Request, convert_class_to_xml,
+ _parse_response_for_dict, _parse_response_for_dict_prefix,
+ _parse_response_for_dict_filter, _parse_response_for_dict_special,
+ BLOB_SERVICE, QUEUE_SERVICE, TABLE_SERVICE, SERVICE_BUS_SERVICE)
+
+class CloudTableClient(_StorageClient):
+ '''
+ This is the main class managing Table resources.
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ '''
+
+ def begin_batch(self):
+ if self._batchclient is None:
+ self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)
+ return self._batchclient.begin_batch()
+
+ def commit_batch(self):
+ try:
+ ret = self._batchclient.commit_batch()
+ finally:
+ self._batchclient = None
+ return ret
+
+ def cancel_batch(self):
+ self._batchclient = None
+ def get_table_service_properties(self):
+ '''
+ Gets the properties of a storage account's Table service, including Windows Azure
+ Storage Analytics.
+ '''
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?restype=service&comp=properties'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return self._parse_response(respbody, StorageServiceProperties)
+
+ def set_table_service_properties(self, storage_service_properties):
+ '''
+ Sets the properties of a storage account's Table Service, including Windows Azure Storage Analytics.
+
+ storage_service_properties: a StorageServiceProperties object.
+ '''
+ validate_not_none('class:storage_service_properties', storage_service_properties)
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/?restype=service&comp=properties'
+ request.body = _get_request_body(convert_class_to_xml(storage_service_properties))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return _parse_response_for_dict(self)
+
+ def query_tables(self):
+ '''
+ Returns a list of tables under the specified account.
+ '''
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/Tables'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_feeds(respbody, convert_xml_to_table)
+
+ def create_table(self, table, fail_on_exist=False):
+ '''
+ Creates a new table in the storage account.
+
+ table: name of the table to create.
+ fail_on_exist: specify whether throw exception when table exists.
+ '''
+ validate_not_none('feed:table', table)
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/Tables'
+ request.body = _get_request_body(convert_table_to_xml(table))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ if not fail_on_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_on_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def delete_table(self, table_name, fail_not_exist=False):
+ '''
+ table_name: name of the table to delete.
+
+ fail_not_exist: specify whether throw exception when table doesn't exist.
+ '''
+ validate_not_none('table-name', table_name)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/Tables(\'' + to_right_type(table_name) + '\')'
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ if not fail_not_exist:
+ try:
+ self._perform_request(request)
+ return True
+ except WindowsAzureError as e:
+ _dont_fail_not_exist(e)
+ return False
+ else:
+ self._perform_request(request)
+ return True
+
+ def get_entity(self, table_name, partition_key, row_key, comma_separated_property_names=''):
+ '''
+ Get an entity in a table and includes the $select options.
+
+ partition_key: PartitionKey of the entity.
+ row_key: RowKey of the entity.
+ comma_separated_property_names: the property names to select.
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('partition-key', partition_key)
+ validate_not_none('row-key', row_key)
+ validate_not_none('comma-separated-property-names', comma_separated_property_names)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '(PartitionKey=\'' + to_right_type(partition_key) + '\',RowKey=\'' + to_right_type(row_key) + '\')?$select=' + to_right_type(comma_separated_property_names) + ''
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_entity(respbody)
+
+ def query_entities(self, table_name, query_expression='', comma_separated_property_names=''):
+ '''
+ Get entities in a table and includes the $filter and $select options.
+
+ query_expression: the query to get entities.
+ comma_separated_property_names: the property names to select.
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('query-expression', query_expression)
+ validate_not_none('comma-separated-property-names', comma_separated_property_names)
+ request = _Request()
+ request.method = 'GET'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '()?$filter=' + to_right_type(query_expression) + '&$select=' + to_right_type(comma_separated_property_names) + ''
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ return convert_xml_to_feeds(respbody, convert_xml_to_entity)
+
+ def insert_entity(self, table_name, entity, content_type='application/atom+xml'):
+ '''
+ Inserts a new entity into a table.
+
+ entity: Required. The entity object to insert. Could be a dict format or entity object.
+ Content-Type: this is required and has to be set to application/atom+xml
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('feed:entity', entity)
+ validate_not_none('Content-Type', content_type)
+ validate_values('Content-Type', to_right_type(content_type), 'application/atom+xml|')
+ request = _Request()
+ request.method = 'POST'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + ''
+ request.header = [('Content-Type', to_right_type(content_type))]
+ request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def update_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
+ '''
+ Updates an existing entity in a table. The Update Entity operation replaces the entire
+ entity and can be used to remove properties.
+
+ entity: Required. The entity object to insert. Could be a dict format or entity object.
+ partition_key: PartitionKey of the entity.
+ row_key: RowKey of the entity.
+ Content-Type: this is required and has to be set to application/atom+xml
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('partition-key', partition_key)
+ validate_not_none('row-key', row_key)
+ validate_not_none('feed:entity', entity)
+ validate_not_none('Content-Type', content_type)
+ validate_values('Content-Type', to_right_type(content_type), 'application/atom+xml|')
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '(PartitionKey=\'' + to_right_type(partition_key) + '\',RowKey=\'' + to_right_type(row_key) + '\')'
+ request.header = [
+ ('Content-Type', to_right_type(content_type)),
+ ('If-Match', to_right_type(if_match))
+ ]
+ request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
+ '''
+ Updates an existing entity by updating the entity's properties. This operation does
+ not replace the existing entity, as the Update Entity operation does.
+
+ entity: Required. The entity object to insert. Could be a dict format or entity object.
+ partition_key: PartitionKey of the entity.
+ row_key: RowKey of the entity.
+ Content-Type: this is required and has to be set to application/atom+xml
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('partition-key', partition_key)
+ validate_not_none('row-key', row_key)
+ validate_not_none('feed:entity', entity)
+ validate_not_none('Content-Type', content_type)
+ validate_values('Content-Type', to_right_type(content_type), 'application/atom+xml|')
+ request = _Request()
+ request.method = 'MERGE'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '(PartitionKey=\'' + to_right_type(partition_key) + '\',RowKey=\'' + to_right_type(row_key) + '\')'
+ request.header = [
+ ('Content-Type', to_right_type(content_type)),
+ ('If-Match', to_right_type(if_match))
+ ]
+ request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def delete_entity(self, table_name, partition_key, row_key, content_type='application/atom+xml', if_match='*'):
+ '''
+ Deletes an existing entity in a table.
+
+ partition_key: PartitionKey of the entity.
+ row_key: RowKey of the entity.
+ if_match: Required. Specifies the condition for which the delete should be performed.
+ To force an unconditional delete, set If-Match to the wildcard character (*).
+ Content-Type: this is required and has to be set to application/atom+xml
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('partition-key', partition_key)
+ validate_not_none('row-key', row_key)
+ validate_not_none('Content-Type', content_type)
+ validate_values('Content-Type', to_right_type(content_type), 'application/atom+xml|')
+ validate_not_none('If-Match', if_match)
+ request = _Request()
+ request.method = 'DELETE'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '(PartitionKey=\'' + to_right_type(partition_key) + '\',RowKey=\'' + to_right_type(row_key) + '\')'
+ request.header = [
+ ('Content-Type', to_right_type(content_type)),
+ ('If-Match', to_right_type(if_match))
+ ]
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
+ '''
+ Replaces an existing entity or inserts a new entity if it does not exist in the table.
+ Because this operation can insert or update an entity, it is also known as an "upsert"
+ operation.
+
+ entity: Required. The entity object to insert. Could be a dict format or entity object.
+ partition_key: PartitionKey of the entity.
+ row_key: RowKey of the entity.
+ Content-Type: this is required and has to be set to application/atom+xml
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('partition-key', partition_key)
+ validate_not_none('row-key', row_key)
+ validate_not_none('feed:entity', entity)
+ validate_not_none('Content-Type', content_type)
+ validate_values('Content-Type', to_right_type(content_type), 'application/atom+xml|')
+ request = _Request()
+ request.method = 'PUT'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '(PartitionKey=\'' + to_right_type(partition_key) + '\',RowKey=\'' + to_right_type(row_key) + '\')'
+ request.header = [
+ ('Content-Type', to_right_type(content_type)),
+ ('If-Match', to_right_type(if_match))
+ ]
+ request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+ def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
+ '''
+ Merges an existing entity or inserts a new entity if it does not exist in the table.
+ Because this operation can insert or update an entity, it is also known as an "upsert"
+ operation..
+
+ entity: Required. The entity object to insert. Could be a dict format or entity object.
+ partition_key: PartitionKey of the entity.
+ row_key: RowKey of the entity.
+ Content-Type: this is required and has to be set to application/atom+xml
+ '''
+ validate_not_none('table-name', table_name)
+ validate_not_none('partition-key', partition_key)
+ validate_not_none('row-key', row_key)
+ validate_not_none('feed:entity', entity)
+ validate_not_none('Content-Type', content_type)
+ validate_values('Content-Type', to_right_type(content_type), 'application/atom+xml|')
+ request = _Request()
+ request.method = 'MERGE'
+ request.host = get_host(TABLE_SERVICE, self.account_name, self.use_local_storage)
+ request.uri = '/' + to_right_type(table_name) + '(PartitionKey=\'' + to_right_type(partition_key) + '\',RowKey=\'' + to_right_type(row_key) + '\')'
+ request.header = [
+ ('Content-Type', to_right_type(content_type)),
+ ('If-Match', to_right_type(if_match))
+ ]
+ request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.uri, request.query = _update_request_uri_query(request, self.use_local_storage)
+ request.header = _update_storage_table_header(request, self.account_name, self.account_key)
+ respbody = self._perform_request(request)
+
+
diff --git a/src/windowsazure/storage/sharedaccesssignature.py b/src/windowsazure/storage/sharedaccesssignature.py
new file mode 100644
index 000000000000..08b15e757219
--- /dev/null
+++ b/src/windowsazure/storage/sharedaccesssignature.py
@@ -0,0 +1,127 @@
+import base64
+import hmac
+import hashlib
+
+SIGNED_START = 'st'
+SIGNED_EXPIRY = 'se'
+SIGNED_RESOURCE = 'sr'
+SIGNED_PERMISSION = 'sp'
+SIGNED_IDENTIFIER = 'si'
+SIGNED_SIGNATURE = 'sig'
+RESOURCE_BLOB = 'blob'
+RESOURCE_CONTAINER = 'container'
+SIGNED_RESOURCE_TYPE = 'resource'
+SHARED_ACCESS_PERMISSION = 'permission'
+
+class WebResource:
+ def __init__(self, path=None, request_url=None, properties={}):
+ self.path = path
+ self.properties = properties
+ self.request_url = request_url
+
+class Permission:
+ def __init__(self, path=None, query_string=None):
+ self.path = path
+ self.query_string = query_string
+
+class SharedAccessPolicy:
+ def __init__(self, access_policy, signed_identifier=None):
+ self.id = signed_identifier
+ self.access_policy = access_policy
+
+class SharedAccessSignature:
+ def __init__(self, account_name, account_key, permission_set=None):
+ self.account_name = account_name
+ self.account_key = account_key
+ self.permission_set = permission_set
+
+ def generate_signed_query_string(self, path, resource_type, shared_access_policy):
+ query_string = {}
+ if shared_access_policy.access_policy.start:
+ query_string[SIGNED_START] = shared_access_policy.access_policy.start
+
+ query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry
+ query_string[SIGNED_RESOURCE] = resource_type
+ query_string[SIGNED_PERMISSION] = shared_access_policy.access_policy.permission
+
+ if shared_access_policy.id:
+ query_string[SIGNED_IDENTIFIER] = shared_access_policy.id
+
+ query_string[SIGNED_SIGNATURE] = self._generate_signature(path, resource_type, shared_access_policy)
+ return query_string
+
+ def sign_request(self, web_resource):
+ if self.permission_set:
+ for shared_access_signature in self.permission_set:
+ if self._permission_matches_request(shared_access_signature, web_resource,
+ web_resource.properties[SIGNED_RESOURCE_TYPE],
+ web_resource.properties[SHARED_ACCESS_PERMISSION]):
+ if web_resource.request_url.find('?') == -1:
+ web_resource.request_url += '?'
+ else:
+ web_resource.request_url += '&'
+
+ web_resource.request_url += self._convert_query_string(shared_access_signature.query_string)
+ break
+ return web_resource
+
+ def _convert_query_string(self, query_string):
+ convert_str = ''
+ if query_string.has_key(SIGNED_START):
+ convert_str += SIGNED_START + '=' + query_string[SIGNED_START] + '&'
+ convert_str += SIGNED_EXPIRY + '=' + query_string[SIGNED_EXPIRY] + '&'
+ convert_str += SIGNED_PERMISSION + '=' + query_string[SIGNED_PERMISSION] + '&'
+ convert_str += SIGNED_RESOURCE_TYPE + '=' + query_string[SIGNED_RESOURCE] + '&'
+
+ if query_string.has_key(SIGNED_IDENTIFIER):
+ convert_str += SIGNED_IDENTIFIER + '=' + query_string[SIGNED_IDENTIFIER] + '&'
+ convert_str += SIGNED_SIGNATURE + '=' + query_string[SIGNED_SIGNATURE] + '&'
+ return convert_str
+
+ def _generate_signature(self, path, resource_type, shared_access_policy):
+
+ def get_value_to_append(value, no_new_line=False):
+ return_value = ''
+ if value:
+ return_value = value
+ if not no_new_line:
+ return_value += '\n'
+ return return_value
+
+ if path[0] != '/':
+ path = '/' + path
+
+ canonicalized_resource = '/' + self.account_name + path;
+ string_to_sign = (get_value_to_append(shared_access_policy.access_policy.permission) +
+ get_value_to_append(shared_access_policy.access_policy.start) +
+ get_value_to_append(shared_access_policy.access_policy.expiry) +
+ get_value_to_append(canonicalized_resource) +
+ get_value_to_append(shared_access_policy.id, True))
+
+ return self._sign(string_to_sign)
+
+ def _permission_matches_request(self, shared_access_signature, web_resource, resource_type, required_permission):
+ required_resource_type = resource_type
+ if required_resource_type == RESOURCE_BLOB:
+ required_resource_type += RESOURCE_CONTAINER
+
+ for name, value in shared_access_signature.query_string.iteritems():
+ if name == SIGNED_RESOURCE and required_resource_type.find(value) == -1:
+ return False
+ elif name == SIGNED_PERMISSION and required_permission.find(value) == -1:
+ return False
+
+ return web_resource.path.find(shared_access_signature.path) != -1
+
+ def _sign(self, string_to_sign):
+ decode_account_key = base64.b64decode(self.account_key)
+ signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
+ return base64.b64encode(signed_hmac_sha256.digest())
+
+
+
+
+
+
+
+
diff --git a/src/windowsazure/storage/storageclient.py b/src/windowsazure/storage/storageclient.py
new file mode 100644
index 000000000000..f1bee24e395b
--- /dev/null
+++ b/src/windowsazure/storage/storageclient.py
@@ -0,0 +1,135 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+import base64
+import urllib2
+import hmac
+import hashlib
+import os
+
+from windowsazure.storage import _storage_error_handler, X_MS_VERSION
+from windowsazure.http.httpclient import _HTTPClient
+from windowsazure import (_parse_response, HTTPError, WindowsAzureError,
+ DEV_ACCOUNT_NAME, DEV_ACCOUNT_KEY)
+
+AZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT'
+AZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY'
+EMULATED = 'EMULATED'
+
+class _StorageClient:
+ '''
+ This is the base class for BlobManager, TableManager and QueueManager.
+ '''
+
+ def __init__(self, account_name=None, account_key=None, protocol='http'):
+ self.account_name = account_name
+ self.account_key = account_key
+ self.status = None
+ self.message = None
+ self.respheader = None
+ self.requestid = None
+ self.protocol = protocol
+ self.use_local_storage = False
+ if os.environ.has_key(EMULATED):
+ if os.environ[EMULATED].lower() == 'false':
+ self.is_emulated = False
+ else:
+ self.is_emulated = True
+ else:
+ self.is_emulated = False
+
+ if not account_name or not account_key:
+ if self.is_emulated:
+ self.account_name = DEV_ACCOUNT_NAME
+ self.account_key = DEV_ACCOUNT_KEY
+ self.use_local_storage = True
+ else:
+ if os.environ.has_key(AZURE_STORAGE_ACCOUNT):
+ self.account_name = os.environ[AZURE_STORAGE_ACCOUNT]
+ if os.environ.has_key(AZURE_STORAGE_ACCESS_KEY):
+ self.account_key = os.environ[AZURE_STORAGE_ACCESS_KEY]
+ else:
+ self.account_name = account_name
+ self.account_key = account_key
+
+ if not self.account_name or not self.account_key:
+ raise WindowsAzureError('You need to provide both account name and access key')
+
+ self.x_ms_version = X_MS_VERSION
+ self._httpclient = _HTTPClient(service_instance=self, account_key=account_key, account_name=account_name, x_ms_version=self.x_ms_version, protocol=protocol)
+ self._batchclient = None
+
+ def _perform_request(self, request):
+ try:
+ if self._batchclient is not None:
+ return self._batchclient.insert_request_to_batch(request)
+ else:
+ resp = self._httpclient.perform_request(request)
+ self.status = self._httpclient.status
+ self.message = self._httpclient.message
+ self.respheader = self._httpclient.respheader
+ except HTTPError as e:
+ self.status = self._httpclient.status
+ self.message = self._httpclient.message
+ self.respheader = self._httpclient.respheader
+ _storage_error_handler(e)
+
+ if not resp:
+ return None
+ return resp
+
+ def _parse_response(self, response, return_type=None):
+ return _parse_response(response, return_type)
+
+ def generate_share_access_string(self, container_name, blob_name, share_access_policy):
+ resource = ''
+ if container_name:
+ resource += container_name + '/'
+ if blob_name:
+ resource += blob_name
+ signed_identifier = ''
+ access_policy = None
+ string_to_sign = ''
+ if isinstance(share_access_policy, SignedIdentifier):
+ access_policy += share_access_policy.access_policy
+ signed_identifier = share_access_policy.id
+ elif isinstance(share_access_policy, AccessPolicy):
+ access_policy = share_access_policy
+ else:
+ raise ValueError('Access Policy Error', 'share_access_policy must be either SignedIdentifier or AccessPolicy instance')
+
+ string_to_sign += access_policy.permission + '\n'
+ string_to_sign += access_policy.start + '\n'
+ string_to_sign += access_policy.expiry + '\n'
+ string_to_sign += '/' + self.account_name + urllib2.quote(resource) + '\n'
+ string_to_sign += signed_identifier
+
+ #sign the request
+ decode_account_key = base64.b64decode(self.account_key)
+ signed_hmac_sha256 = hmac.HMAC(decode_account_key, string_to_sign, hashlib.sha256)
+
+ share_access_string = 'st=' + access_policy.start + '&'
+ share_access_string += 'se=' + access_policy.expiry + '&'
+ share_access_string += 'sp=' + access_policy.permission + '&'
+ if not blob_name:
+ share_access_string += 'sr=c&'
+ share_access_string += signed_identifier + '&'
+ else:
+ share_access_string += 'sr=b&'
+ share_access_string += base64.b64encode(signed_hmac_sha256.digest())
+
+ return share_access_string
+
+
+
+
+
diff --git a/test/run.bash b/test/run.bash
new file mode 100644
index 000000000000..278c383fba82
--- /dev/null
+++ b/test/run.bash
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+export PYTHONPATH=$PYTHONPATH:../src
+
+echo "Running tests..."
+python -m unittest discover -p "test_*.py"
diff --git a/test/run.bat b/test/run.bat
new file mode 100644
index 000000000000..4a39f9b8f911
--- /dev/null
+++ b/test/run.bat
@@ -0,0 +1,52 @@
+@echo OFF
+REM----------------------------------------------------------------------------
+REM Copyright (c) Microsoft Corporation.
+REM
+REM This source code is subject to terms and conditions of the Apache License,
+REM Version 2.0. A copy of the license can be found in the License.html file at
+REM the root of this distribution. If you cannot locate the Apache License,
+REM Version 2.0, please send an email to vspython@microsoft.com. By using this
+REM source code in any fashion, you are agreeing to be bound by the terms of the
+REM Apache License, Version 2.0.
+REM
+REM You must not remove this notice, or any other, from this software.
+REM----------------------------------------------------------------------------
+cls
+
+if "%PYTHONPATH%" == "" (
+ set PYTHONPATH=..\src
+) else (
+ set PYTHONPATH=%PYTHONPATH%:..\src
+)
+
+echo Running tests...
+%SystemDrive%\Python27\python.exe -m unittest discover -p "test_*.py"
+set UNITTEST_EC=%ERRORLEVEL%
+echo Finished running tests!
+
+if exist "%SystemDrive%\Python27\Scripts\coverage.exe" (
+ goto :coverage
+)
+
+
+REM ---------------------------------------------------------------------------
+if not exist "%SystemDrive%\Python27\Scripts\pip.exe" (
+ echo Cannot do a code coverage run when neither 'coverage' nor 'pip' are installed.
+ goto :exit_door
+)
+
+echo Installing 'coverage' package...
+%SystemDrive%\Python27\Scripts\pip.exe install coverage==3.5.2
+echo Finished installing 'coverage' package
+
+REM ---------------------------------------------------------------------------
+:coverage
+echo Starting coverage run...
+%SystemDrive%\Python27\Scripts\coverage.exe run -m unittest discover -p "test_*.py"
+%SystemDrive%\Python27\Scripts\coverage.exe html
+start %CD%\htmlcov\index.html
+echo Finished coverage run!
+
+REM ---------------------------------------------------------------------------
+:exit_door
+exit /B %UNITTEST_EC%
\ No newline at end of file
diff --git a/test/windowsazuretest.pyproj b/test/windowsazuretest.pyproj
new file mode 100644
index 000000000000..9993016dd0cd
--- /dev/null
+++ b/test/windowsazuretest.pyproj
@@ -0,0 +1,48 @@
+
+
+
+ Debug
+ 2.0
+ {c0742a2d-4862-40e4-8a28-036eecdbc614}
+ .
+
+
+ ..\src
+ .
+ .
+ windowsazuretest
+ windowsazuretest
+
+
+
+
+
+
+
+
+
+
+ true
+ false
+
+
+ true
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/test/windowsazuretest.pyproj.vspscc b/test/windowsazuretest.pyproj.vspscc
new file mode 100644
index 000000000000..b6d32892fd64
--- /dev/null
+++ b/test/windowsazuretest.pyproj.vspscc
@@ -0,0 +1,10 @@
+""
+{
+"FILE_VERSION" = "9237"
+"ENLISTMENT_CHOICE" = "NEVER"
+"PROJECT_FILE_RELATIVE_PATH" = ""
+"NUMBER_OF_EXCLUDED_FILES" = "0"
+"ORIGINAL_PROJECT_FILE_PATH" = ""
+"NUMBER_OF_NESTED_PROJECTS" = "0"
+"SOURCE_CONTROL_SETTINGS_PROVIDER" = "PROVIDER"
+}
diff --git a/test/windowsazuretest/__init__.py b/test/windowsazuretest/__init__.py
new file mode 100644
index 000000000000..289ef7652546
--- /dev/null
+++ b/test/windowsazuretest/__init__.py
@@ -0,0 +1,12 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
diff --git a/test/windowsazuretest/test_cloudblobclient.py b/test/windowsazuretest/test_cloudblobclient.py
new file mode 100644
index 000000000000..5d7b7617178c
--- /dev/null
+++ b/test/windowsazuretest/test_cloudblobclient.py
@@ -0,0 +1,381 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+from windowsazure.storage.cloudblobclient import *
+from windowsazure.storage import Metrics, BlockList
+from windowsazure import WindowsAzureError
+
+from windowsazuretest.util import (credentials,
+ getUniqueTestRunID,
+ STATUS_OK,
+ STATUS_CREATED,
+ STATUS_ACCEPTED,
+ DEFAULT_SLEEP_TIME,
+ DEFAULT_LEASE_TIME)
+
+import unittest
+import time
+
+#------------------------------------------------------------------------------
+__uid = getUniqueTestRunID()
+
+CONTAINER_TO_DELETE = 'containertodelete%s' % (__uid)
+CONTAINER_NO_DELETE = 'containernodelete%s' % (__uid)
+BLOB_TO_DELETE = 'blobtodelete%s' % (__uid)
+BLOB_NO_DELETE = 'blobnodelete%s' % (__uid)
+BLOCK_BLOB = 'blockblob%s' % (__uid)
+PAGE_BLOB = 'mytestpageblob%s' % (__uid)
+COPY_BLOB = 'mytestblobcopy%s' % (__uid)
+
+#------------------------------------------------------------------------------
+class CloudBlobClientTest(unittest.TestCase):
+
+ def setUp(self):
+ self.bc = CloudBlobClient(account_name=credentials.getStorageServicesName(),
+ account_key=credentials.getStorageServicesKey())
+ self.cleanup()
+ time.sleep(DEFAULT_SLEEP_TIME)
+
+ def tearDown(self):
+ self.cleanup()
+ return super(CloudBlobClientTest, self).tearDown()
+
+ def cleanup(self):
+ for cont in [CONTAINER_NO_DELETE, CONTAINER_TO_DELETE]:
+ for blob in [BLOB_NO_DELETE, BLOB_TO_DELETE]:
+ try:
+ self.bc.delete_blob(cont, blob)
+ except: pass
+
+ try:
+ self.bc.delete_container(cont)
+ except: pass
+
+ def test_sanity(self):
+ self.sanity_create_container()
+ self.sanity_list_containers()
+ self.sanity_get_container_properties()
+ self.sanity_get_container_acl()
+ self.sanity_set_container_acl()
+ self.sanity_get_container_metadata()
+ self.sanity_set_container_metadata()
+ self.sanity_delete_container()
+
+ self.sanity_put_blob()
+ self.sanity_get_blob()
+ self.sanity_get_blob_properties()
+ self.sanity_set_blob_properties()
+ self.sanity_get_blob_metadata()
+ self.sanity_set_blob_metadata()
+ self.sanity_lease_blob()
+ self.sanity_snapshot_blob()
+ self.sanity_copy_blob()
+ self.sanity_list_blobs()
+ self.sanity_delete_blob()
+ self.sanity_put_block()
+ self.sanity_put_block_list()
+ self.sanity_get_block_list()
+ self.sanity_put_page()
+ self.sanity_get_page_ranges()
+
+ #--Helpers-----------------------------------------------------------------
+ # container tests
+ def sanity_create_container(self):
+ resp = self.bc.create_container(CONTAINER_NO_DELETE)
+ self.assertTrue(resp)
+ resp = self.bc.create_container(CONTAINER_TO_DELETE)
+ self.assertTrue(resp)
+
+ def list_containers_helper(self):
+ containers = self.bc.list_containers()
+ containers2 = [x for x in containers] #check __iter__
+ containers = [x for x in containers.containers]
+ self.assertItemsEqual(containers, containers2)
+
+ tmpDict = {}
+ for x in containers:
+ if not tmpDict.has_key(x.name):
+ tmpDict[x.name] = 0
+ tmpDict[x.name] = tmpDict[x.name] + 1
+ return tmpDict
+
+ def sanity_list_containers(self):
+ tmpDict = self.list_containers_helper()
+
+ for x in [CONTAINER_NO_DELETE, CONTAINER_TO_DELETE]:
+ self.assertIn(x, tmpDict.keys())
+ self.assertEqual(tmpDict[x], 1)
+
+ def list_blobs_helper(self, contName):
+ blobs = self.bc.list_blobs(contName)
+ blobs2 = [x for x in blobs] #check __iter__
+ blobs = [x for x in blobs.blobs]
+ self.assertItemsEqual(blobs, blobs2)
+
+ tmpDict = {}
+ for x in blobs:
+ if not tmpDict.has_key(x.name):
+ tmpDict[x.name] = 0
+ tmpDict[x.name] = tmpDict[x.name] + 1
+ return tmpDict
+
+ def sanity_list_blobs(self):
+ tmpDict = self.list_blobs_helper(CONTAINER_NO_DELETE)
+ for x in [PAGE_BLOB, BLOCK_BLOB,
+ BLOB_NO_DELETE, BLOB_TO_DELETE,
+ COPY_BLOB]:
+ self.assertIn(x, tmpDict.keys())
+ self.assertEqual(tmpDict[x], 1)
+
+ def sanity_get_container_properties(self):
+ container_properties = self.bc.get_container_properties(CONTAINER_NO_DELETE)
+
+ def sanity_get_container_acl(self):
+ container_acl = self.bc.get_container_acl(CONTAINER_NO_DELETE)
+ self.assertEqual(len(container_acl.signed_identifiers),
+ 0)
+
+ def sanity_set_container_acl(self):
+ container_acl = self.bc.get_container_acl(CONTAINER_NO_DELETE)
+ resp = self.bc.set_container_acl(CONTAINER_NO_DELETE, container_acl)
+ self.assertEqual(resp,
+ None)
+
+ #What we get back here should be equivalent to the original
+ container_acl2 = self.bc.get_container_acl(CONTAINER_NO_DELETE)
+ self.assertEquals(container_acl.signed_identifiers,
+ container_acl2.signed_identifiers)
+
+ def sanity_get_container_metadata(self):
+ resp = self.bc.get_container_metadata(CONTAINER_NO_DELETE)
+ # TODO: verify result
+
+ def sanity_set_container_metadata(self):
+ pass
+ # TODO: verify this, behavior related to trimming of names appears to have changed
+ #md = self.bc.get_container_metadata(CONTAINER_NO_DELETE)
+ #self.assertFalse(hasattr(md, "x_ms_meta_a"))
+ #resp = self.bc.set_container_metadata(CONTAINER_NO_DELETE, {'a' : 'bcbbd'})
+ #self.assertEqual(resp,
+ # None)
+ #md = self.bc.get_container_metadata(CONTAINER_NO_DELETE)
+ #self.assertEqual(md.x_ms_meta_a,
+ # u'bcbbd')
+
+ def sanity_delete_container(self):
+ resp = self.bc.delete_container(CONTAINER_TO_DELETE)
+ self.assertTrue(resp)
+
+ #Verify it was actually removed
+ tmpDict = self.list_containers_helper()
+
+ self.assertNotIn(CONTAINER_TO_DELETE, tmpDict.keys())
+ self.assertEqual(tmpDict[CONTAINER_NO_DELETE], 1)
+
+ #blob tests
+ def sanity_put_blob(self):
+ resp = self.bc.put_blob(CONTAINER_NO_DELETE,
+ BLOB_TO_DELETE,
+ 'This blob gets deleted',
+ x_ms_blob_type='BlockBlob')
+ self.assertEqual(resp, None)
+ #self.assertEqual(resp.content_m_d5, u'tdVPvWDrISWkirBY9i0FSQ==')
+
+ resp = self.bc.put_blob(CONTAINER_NO_DELETE,
+ BLOB_NO_DELETE,
+ 'This is blob not deleted',
+ x_ms_blob_type='BlockBlob')
+ self.assertEqual(resp, None)
+ #self.assertEqual(resp.content_m_d5, u'HZfRAUjvPvOegAWlLDwLTQ==')
+
+ resp = self.bc.put_blob(CONTAINER_NO_DELETE,
+ BLOCK_BLOB,
+ 'This is block blob',
+ x_ms_blob_type='BlockBlob')
+ self.assertEqual(resp, None)
+ #self.assertEqual(resp.content_m_d5, u'6Eqt0OcuyhknAwC87yMtNA==')
+
+ resp = self.bc.put_blob(CONTAINER_NO_DELETE,
+ PAGE_BLOB,
+ '',
+ x_ms_blob_type='PageBlob',
+ x_ms_blob_content_length='1024')
+ self.assertEqual(resp, None)
+ #self.assertFalse(hasattr(resp, "content_m_d5"))
+
+ def sanity_get_blob(self):
+ resp = self.bc.get_blob(CONTAINER_NO_DELETE, BLOB_NO_DELETE)
+ self.assertEqual(resp, 'This is blob not deleted')
+ self.assertEqual(type(resp), str)
+
+ def sanity_get_blob_properties(self):
+ resp = self.bc.get_blob_properties()
+ self.assertIsInstance(resp.logging, Logging)
+ self.assertIsInstance(resp.metrics, Metrics)
+
+ def sanity_set_blob_properties(self):
+ blob_properties = self.bc.get_blob_properties()
+
+ self.assertEquals(blob_properties.logging.retention_policy.enabled,
+ False)
+ blob_properties.logging.retention_policy.enabled=False
+
+ self.assertEquals(blob_properties.metrics.enabled,
+ True)
+ blob_properties.metrics.enabled=True
+
+ self.assertEquals(blob_properties.metrics.retention_policy.enabled,
+ False)
+ blob_properties.metrics.retention_policy.enabled=False
+
+ resp = self.bc.set_blob_properties(blob_properties)
+ self.assertEquals(resp, None)
+
+ blob_properties2 = self.bc.get_blob_properties()
+ self.assertEquals(blob_properties2.logging.retention_policy.enabled,
+ False)
+ self.assertEquals(blob_properties2.metrics.enabled,
+ True)
+ self.assertEquals(blob_properties2.metrics.retention_policy.enabled,
+ False)
+
+ def sanity_get_blob_metadata(self):
+ resp = self.bc.get_blob_metadata(CONTAINER_NO_DELETE, BLOB_NO_DELETE)
+ # TODO: verify result
+
+ def sanity_set_blob_metadata(self):
+ pass
+ # TODO: verify this, behavior related to trimming of names appears to have changed
+ #resp = self.bc.set_blob_metadata(CONTAINER_NO_DELETE,
+ # BLOB_NO_DELETE,
+ # {'set_blob_metadata':'test1'})
+ #self.assertEquals(resp, None)
+
+ #resp = self.bc.get_blob_metadata(CONTAINER_NO_DELETE, BLOB_NO_DELETE)
+ #self.assertEquals(resp['x_ms_meta_set_blob_metadata'], u'test1')
+
+ def sanity_lease_blob(self):
+ resp = self.bc.lease_blob(CONTAINER_NO_DELETE,
+ BLOB_NO_DELETE,
+ x_ms_lease_action='acquire')
+ # TODO: verify result
+
+ #The lease has a lifespan of a minute
+ self.assertRaises(WindowsAzureError,
+ #TODO - WindowsAzureError doesn't override __str__ ?
+ #"There is already a lease present",
+ lambda: self.bc.lease_blob(CONTAINER_NO_DELETE, BLOB_NO_DELETE, x_ms_lease_action='acquire'))
+ time.sleep(DEFAULT_LEASE_TIME)
+
+ resp = self.bc.lease_blob(CONTAINER_NO_DELETE,
+ BLOB_NO_DELETE,
+ x_ms_lease_action='acquire')
+ # TODO: verify result
+
+ #TODO - file a bug
+ if True:
+ time.sleep(DEFAULT_LEASE_TIME)
+ else:
+ resp = self.bc.lease_blob(CONTAINER_NO_DELETE,
+ BLOB_NO_DELETE,
+ x_ms_lease_action='release')
+ # TODO: verify result
+
+ def sanity_snapshot_blob(self):
+ resp = self.bc.snapshot_blob(CONTAINER_NO_DELETE,
+ BLOB_NO_DELETE)
+ self.assertEquals(resp,
+ None)
+ #self.assertTrue(hasattr(resp, "x_ms_snapshot"))
+
+ def sanity_copy_blob(self):
+ newBlobName = COPY_BLOB
+ sourceblob = '/%s/%s/%s' % (credentials.getStorageServicesName(),
+ CONTAINER_NO_DELETE,
+ BLOB_NO_DELETE)
+ resp = self.bc.copy_blob(CONTAINER_NO_DELETE,
+ newBlobName,
+ x_ms_copy_source=sourceblob)
+ self.assertEquals(resp, None)
+
+ resp = self.bc.get_blob(CONTAINER_NO_DELETE, newBlobName)
+ self.assertEqual(resp, 'This is blob not deleted')
+
+ def sanity_delete_blob(self):
+ resp = self.bc.delete_blob(CONTAINER_NO_DELETE, BLOB_TO_DELETE)
+ self.assertEquals(resp, None)
+
+ self.assertRaises(WindowsAzureError,
+ lambda: self.bc.delete_blob(CONTAINER_NO_DELETE, BLOB_TO_DELETE))
+
+ def sanity_put_block(self):
+ md5Dict = {0: u'TjjhPkKeLS6Els52i6m9Bg==',
+ 1: u'ZOnmAD+J5F2p66g8NFSefA==',
+ 2: u'giBgEwOK96+T6eqweyrlNg==',
+ 3: u'FDhv5/Vy34Z9KKvEnjH2lQ==',
+ 4: u'jkC3Z8KTocewrRQF+tkxeA=='}
+
+ for i in xrange(5):
+ resp = self.bc.put_block(CONTAINER_NO_DELETE,
+ BLOB_TO_DELETE,
+ 'block %d' % (i),
+ str(i))
+ self.assertEquals(resp, None)
+ #self.assertEquals(resp.content_m_d5, md5Dict[i])
+
+ def sanity_put_block_list(self):
+ resp = self.bc.get_block_list(CONTAINER_NO_DELETE, BLOB_TO_DELETE)
+ self.assertItemsEqual(resp.committed_blocks,
+ [])
+ self.assertItemsEqual(resp.uncommitted_blocks,
+ [])
+
+ bl = BlockList()
+ bl.latest += [str(x) for x in range(4)]
+ resp = self.bc.put_block_list(CONTAINER_NO_DELETE, BLOB_TO_DELETE, bl)
+ self.assertEquals(resp, None)
+
+ def sanity_get_block_list(self):
+ resp = self.bc.get_block_list(CONTAINER_NO_DELETE, BLOB_TO_DELETE)
+ self.assertItemsEqual([x.id for x in resp.committed_blocks],
+ [str(x) for x in range(4)])
+ #TODO - bug?
+ #self.assertItemsEqual([x.id for x in resp.uncommitted_blocks],
+ # ["4"])
+
+ def sanity_put_page(self):
+ tmpBlobName = 'mytestpageblob1'
+ resp = self.bc.put_blob(CONTAINER_NO_DELETE,
+ tmpBlobName,
+ '',
+ x_ms_blob_type='PageBlob',
+ x_ms_blob_content_length='1024')
+ self.assertEquals(resp, None)
+
+ resp = self.bc.put_page(CONTAINER_NO_DELETE,
+ tmpBlobName,
+ page='',
+ x_ms_range='bytes=0-511',
+ x_ms_page_write='clear')
+ self.assertEquals(resp, None)
+ #self.assertEquals(resp.x_ms_blob_sequence_number, u'0')
+
+ resp = self.bc.get_page_ranges(CONTAINER_NO_DELETE, tmpBlobName)
+ self.assertEquals(len(resp.page_ranges), 0)
+
+ def sanity_get_page_ranges(self):
+ self.bc.get_page_ranges(CONTAINER_NO_DELETE, PAGE_BLOB)
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/windowsazuretest/test_cloudqueueclient.py b/test/windowsazuretest/test_cloudqueueclient.py
new file mode 100644
index 000000000000..151ddce52054
--- /dev/null
+++ b/test/windowsazuretest/test_cloudqueueclient.py
@@ -0,0 +1,117 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+
+from windowsazure.storage.cloudqueueclient import *
+
+from windowsazuretest.util import credentials, getUniqueTestRunID
+
+import unittest
+import time
+
+#------------------------------------------------------------------------------
+__uid = getUniqueTestRunID()
+
+QUEUE_NO_DELETE = 'mytestqueuenodelete%s' % (__uid)
+QUEUE_TO_DELETE = 'mytestqueuetodelete%s' % (__uid)
+
+#------------------------------------------------------------------------------
+class StorageTest(unittest.TestCase):
+
+ def setUp(self):
+ self.queue_client = CloudQueueClient(account_name=credentials.getStorageServicesName(),
+ account_key=credentials.getStorageServicesKey())
+
+ self.cleanup()
+ time.sleep(10)
+
+ def tearDown(self):
+ self.cleanup()
+ return super(StorageTest, self).tearDown()
+
+ def cleanup(self):
+ try: self.queue_client.delete_queue(QUEUE_NO_DELETE)
+ except: pass
+ try: self.queue_client.delete_queue(QUEUE_TO_DELETE)
+ except: pass
+
+ def test_queue_service(self):
+ self.create_queue()
+ self.list_queues()
+ self.get_queue_service_properties()
+ self.set_queue_service_properties()
+ self.get_queue_metadata()
+ self.set_queue_metadata()
+ self.put_message()
+ self.peek_messages()
+ self.get_messages()
+ self.update_message()
+ self.delete_message()
+ self.clear_messages()
+
+ #--Helpers-----------------------------------------------------------------
+ #queue test helpers
+ def create_queue(self):
+ self.queue_client.create_queue(QUEUE_TO_DELETE)
+ self.queue_client.create_queue(QUEUE_NO_DELETE)
+
+ def list_queues(self):
+ self.queue_client.list_queues()
+
+ def delete_queue(self):
+ '''
+ TODO - this isn't called by anything
+ '''
+ self.queue_client.delete_queue(QUEUE_TO_DELETE)
+
+ def get_queue_service_properties(self):
+ self.queue_client.get_queue_service_properties()
+
+ def set_queue_service_properties(self):
+ queue_properties = self.queue_client.get_queue_service_properties()
+ queue_properties.logging.retention_policy.enabled=False
+ queue_properties.metrics.enabled=False
+ queue_properties.metrics.retention_policy.enabled=False
+ self.queue_client.set_queue_service_properties(queue_properties)
+
+ def get_queue_metadata(self):
+ self.queue_client.get_queue_metadata(QUEUE_NO_DELETE)
+
+ def set_queue_metadata(self):
+ self.queue_client.set_queue_metadata(QUEUE_NO_DELETE, {'category':'test'})
+
+ def put_message(self):
+ self.queue_client.put_message(QUEUE_NO_DELETE, 'This is a message')
+
+ def peek_messages(self):
+ self.queue_client.peek_messages(QUEUE_NO_DELETE)
+
+ def get_messages(self):
+ self.queue_client.get_messages(QUEUE_NO_DELETE)
+
+ def update_message(self):
+ #self.queue_client.update_message(queuenodelete, messageid, 'This is updated message', popreceipt, visibilitytimeout)
+ pass
+
+ def delete_message(self):
+ #self.queue_client.put_message(queuenodelete, 'This is message to delete')
+ #self.queue_client.get_messages(queuenodelete)
+ #self.queue_client.delete_message(queuenodelete, messageid, popreceipt)
+ pass
+
+ def clear_messages(self):
+ self.queue_client.clear_messages(QUEUE_NO_DELETE)
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/windowsazuretest/test_cloudtableclient.py b/test/windowsazuretest/test_cloudtableclient.py
new file mode 100644
index 000000000000..7213e9489f9d
--- /dev/null
+++ b/test/windowsazuretest/test_cloudtableclient.py
@@ -0,0 +1,281 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+from windowsazure.storage.cloudtableclient import *
+from windowsazure.storage import EntityProperty, Entity
+from windowsazure import WindowsAzureError
+
+
+from windowsazuretest.util import (credentials,
+ getUniqueTestRunID,
+ STATUS_OK,
+ STATUS_CREATED,
+ STATUS_ACCEPTED,
+ STATUS_NO_CONTENT)
+
+import unittest
+import time
+from datetime import datetime
+
+#------------------------------------------------------------------------------
+__uid = getUniqueTestRunID()
+
+TABLE_TO_DELETE = 'mytesttabletodelete%s' % (__uid)
+TABLE_NO_DELETE = 'mytesttablenodelete%s' % (__uid)
+ENTITY_TO_DELETE = 'mytestentitytodelete%s' % (__uid)
+ENTITY_NO_DELETE = 'mytestentitynodelete%s' % (__uid)
+
+#------------------------------------------------------------------------------
+class StorageTest(unittest.TestCase):
+ '''
+ TODO:
+ - comprehensive, positive test cases for all table client methods
+ - comprehensive, negative test cases all table client methods
+ - missing coverage for begin_batch
+ - missing coverage for cancel_batch
+ - missing coverage for commit_batch
+ - get_table_service_properties busted
+ - set_table_service_properties busted
+ '''
+
+ def setUp(self):
+ self.tc = CloudTableClient(account_name=credentials.getStorageServicesName(),
+ account_key=credentials.getStorageServicesKey())
+ self.cleanup()
+ time.sleep(10)
+
+ def tearDown(self):
+ self.cleanup()
+ return super(StorageTest, self).tearDown()
+
+ def cleanup(self):
+ for cont in [TABLE_NO_DELETE, TABLE_TO_DELETE]:
+ try: self.tc.delete_table(cont)
+ except: pass
+
+ def test_sanity(self):
+ self.sanity_create_table()
+ self.sanity_query_tables()
+
+ #TODO - this fails, but I want the code coverage
+ try: self.sanity_get_table_service_properties()
+ except: pass
+ try: self.sanity_set_table_service_properties()
+ except: pass
+
+ self.sanity_delete_table()
+
+ self.sanity_insert_entity()
+ self.sanity_get_entity()
+ self.sanity_query_entities()
+ self.sanity_update_entity()
+ self.sanity_insert_or_merge_entity()
+ self.sanity_insert_or_replace_entity()
+ self.sanity_merge_entity()
+ self.sanity_delete_entity()
+
+ self.sanity_begin_batch()
+ self.sanity_commit_batch()
+ self.sanity_cancel_batch()
+
+ #--Helpers-----------------------------------------------------------------
+ def sanity_create_table(self):
+ resp = self.tc.create_table(TABLE_TO_DELETE)
+ self.assertTrue(resp)
+ #self.assertEqual(resp.cache_control, u'no-cache')
+
+ resp = self.tc.create_table(TABLE_NO_DELETE)
+ self.assertTrue(resp)
+ #self.assertEqual(resp.cache_control, u'no-cache')
+
+ def sanity_query_tables(self):
+ resp = self.tc.query_tables()
+ self.assertEqual(type(resp), list)
+ tableNames = [x.name for x in resp]
+ self.assertGreaterEqual(len(tableNames), 2)
+ self.assertIn(TABLE_NO_DELETE, tableNames)
+ self.assertIn(TABLE_TO_DELETE, tableNames)
+
+ def sanity_delete_table(self):
+ resp = self.tc.delete_table(TABLE_TO_DELETE)
+ self.assertTrue(resp)
+
+ def sanity_get_table_service_properties(self):
+ #TODO - file a bug; add assertions!
+ resp = self.tc.get_table_service_properties()
+
+ def sanity_set_table_service_properties(self):
+ #TODO - file a bug; add assertions!
+ table_properties = self.tc.get_table_service_properties()
+ self.tc.set_table_service_properties(table_properties)
+
+ def sanity_insert_entity(self):
+ resp = self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
+ 'RowKey':'Firstname',
+ 'age':39,
+ 'sex':'male',
+ 'birthday':datetime(1973,10,04)})
+ self.assertEquals(resp, None)
+
+ entity = Entity()
+ entity.PartitionKey = 'Lastname'
+ entity.RowKey = 'Firstname1'
+ entity.age = 39
+ entity.Birthday = EntityProperty('Edm.Int64', 20)
+
+ resp = self.tc.insert_entity(TABLE_NO_DELETE, entity)
+ self.assertEquals(resp, None)
+
+ def sanity_get_entity(self):
+ ln = u'Lastname'
+ fn1 = u'Firstname1'
+ resp = self.tc.get_entity(TABLE_NO_DELETE,
+ ln,
+ fn1,
+ '')
+ self.assertEquals(resp.PartitionKey, ln)
+ self.assertEquals(resp.RowKey, fn1)
+ self.assertEquals(resp.age.value, u'39')
+ self.assertEquals(resp.age.type, u'Edm.Int32')
+ self.assertEquals(resp.Birthday.value, u'20')
+ self.assertEquals(resp.Birthday.type, 'Edm.Int64')
+
+ def sanity_query_entities(self):
+ resp = self.tc.query_entities(TABLE_NO_DELETE, '', '')
+ self.assertEquals(len(resp), 2)
+ self.assertEquals(resp[0].birthday.value, u'1973-10-04T00:00:00Z')
+ self.assertEquals(resp[1].Birthday.value, u'20')
+
+ def sanity_update_entity(self):
+ ln = u'Lastname'
+ fn = u'Firstname'
+ resp = self.tc.update_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ {'PartitionKey':'Lastname',
+ 'RowKey':'Firstname',
+ 'age':21,
+ 'sex':'female',
+ 'birthday':datetime(1991,10,04)})
+ self.assertEquals(resp, None)
+
+ resp = self.tc.get_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ '')
+ self.assertEquals(resp.PartitionKey, ln)
+ self.assertEquals(resp.RowKey, fn)
+ self.assertEquals(resp.age.value, u'21')
+ self.assertEquals(resp.age.type, u'Edm.Int32')
+ self.assertEquals(resp.sex, u'female')
+ self.assertEquals(resp.birthday.value, u'1991-10-04T00:00:00Z')
+ self.assertEquals(resp.birthday.type, 'Edm.DateTime')
+
+ def sanity_insert_or_merge_entity(self):
+ ln = u'Lastname'
+ fn = u'Firstname'
+ resp = self.tc.insert_or_merge_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ {'PartitionKey':'Lastname',
+ 'RowKey':'Firstname',
+ 'age': u'abc', #changed type
+ 'sex':'male', #changed value
+ 'birthday':datetime(1991,10,04),
+ 'sign' : 'aquarius' #new
+ })
+ self.assertEquals(resp, None)
+
+ resp = self.tc.get_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ '')
+ self.assertEquals(resp.PartitionKey, ln)
+ self.assertEquals(resp.RowKey, fn)
+ self.assertEquals(resp.age, u'abc')
+ self.assertEquals(resp.sex, u'male')
+ self.assertEquals(resp.birthday.value, u'1991-10-04T00:00:00Z')
+ self.assertEquals(resp.birthday.type, 'Edm.DateTime')
+ self.assertEquals(resp.sign, u'aquarius')
+
+ def sanity_insert_or_replace_entity(self):
+ ln = u'Lastname'
+ fn = u'Firstname'
+ resp = self.tc.insert_or_replace_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ {'PartitionKey':'Lastname',
+ 'RowKey':'Firstname',
+ 'age':1,
+ 'sex':'male'})
+ self.assertEquals(resp, None)
+
+ resp = self.tc.get_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ '')
+ self.assertEquals(resp.PartitionKey, ln)
+ self.assertEquals(resp.RowKey, fn)
+ self.assertEquals(resp.age.value, u'1')
+ self.assertEquals(resp.sex, u'male')
+ self.assertFalse(hasattr(resp, "birthday"))
+ self.assertFalse(hasattr(resp, "sign"))
+
+ def sanity_merge_entity(self):
+ ln = u'Lastname'
+ fn = u'Firstname'
+ resp = self.tc.merge_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ {'PartitionKey':'Lastname',
+ 'RowKey':'Firstname',
+ 'sex':'female',
+ 'fact': 'nice person'})
+ self.assertEquals(resp, None)
+
+ resp = self.tc.get_entity(TABLE_NO_DELETE,
+ ln,
+ fn,
+ '')
+ self.assertEquals(resp.PartitionKey, ln)
+ self.assertEquals(resp.RowKey, fn)
+ self.assertEquals(resp.age.value, u'1')
+ self.assertEquals(resp.sex, u'female')
+ self.assertEquals(resp.fact, u'nice person')
+ self.assertFalse(hasattr(resp, "birthday"))
+
+ def sanity_delete_entity(self):
+ ln = u'Lastname'
+ fn = u'Firstname'
+ resp = self.tc.delete_entity(TABLE_NO_DELETE,
+ ln,
+ fn)
+ self.assertEquals(resp, None)
+
+ self.assertRaises(WindowsAzureError,
+ lambda: self.tc.get_entity(TABLE_NO_DELETE, ln, fn, ''))
+
+ def sanity_begin_batch(self):
+ resp = self.tc.begin_batch()
+ self.assertEquals(resp, None)
+
+ def sanity_commit_batch(self):
+ resp = self.tc.commit_batch()
+ self.assertEquals(resp, None)
+
+ def sanity_cancel_batch(self):
+ resp = self.tc.cancel_batch()
+ self.assertEquals(resp, None)
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/windowsazuretest/test_servicebusservice.py b/test/windowsazuretest/test_servicebusservice.py
new file mode 100644
index 000000000000..d1777a5d1ee9
--- /dev/null
+++ b/test/windowsazuretest/test_servicebusservice.py
@@ -0,0 +1,830 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+from windowsazure import *
+from windowsazure.servicebus import *
+from windowsazuretest.util import *
+
+import unittest
+
+#------------------------------------------------------------------------------
+class ServiceBusTest(unittest.TestCase):
+ def setUp(self):
+ self.sbs = ServiceBusService(credentials.getServiceBusNamespace(),
+ credentials.getServiceBusKey(),
+ 'owner')
+
+ # TODO: it may be overkill to use the machine name from
+ # getUniqueTestRunID, current time may be unique enough
+ __uid = getUniqueTestRunID()
+
+ queue_base_name = u'mytestqueue%s' % (__uid)
+ topic_base_name = u'mytesttopic%s' % (__uid)
+
+ self.queue_name = getUniqueNameBasedOnCurrentTime(queue_base_name)
+ self.topic_name = getUniqueNameBasedOnCurrentTime(topic_base_name)
+
+ def tearDown(self):
+ self.cleanup()
+ return super(ServiceBusTest, self).tearDown()
+
+ def cleanup(self):
+ try:
+ self.sbs.delete_queue(self.queue_name)
+ except: pass
+
+ try:
+ self.sbs.delete_topic(self.topic_name)
+ except: pass
+
+ #--Helpers-----------------------------------------------------------------
+
+ # TODO: move this function out of here so other tests can use them
+ # TODO: find out how to import/use safe_repr instead repr
+ def assertNamedItemInContainer(self, container, item_name, msg=None):
+ for item in container:
+ if item.name == item_name:
+ return
+
+ standardMsg = '%s not found in %s' % (repr(item_name), repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ # TODO: move this function out of here so other tests can use them
+ # TODO: find out how to import/use safe_repr instead repr
+ def assertNamedItemNotInContainer(self, container, item_name, msg=None):
+ for item in container:
+ if item.name == item_name:
+ standardMsg = '%s unexpectedly found in %s' % (repr(item_name), repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def _create_queue(self, queue_name):
+ self.sbs.create_queue(queue_name, None, True)
+
+ def _create_queue_and_send_msg(self, queue_name, msg):
+ self._create_queue(queue_name)
+ self.sbs.send_queue_message(queue_name, msg)
+
+ def _create_topic(self, topic_name):
+ self.sbs.create_topic(topic_name, None, True)
+
+ def _create_topic_and_subscription(self, topic_name, subscription_name):
+ self._create_topic(topic_name)
+ self._create_subscription(topic_name, subscription_name)
+
+ def _create_subscription(self, topic_name, subscription_name):
+ self.sbs.create_subscription(topic_name, subscription_name, None, True)
+
+ #--Test cases for queues --------------------------------------------------
+ def test_create_queue_no_options(self):
+ # Arrange
+
+ # Act
+ created = self.sbs.create_queue(self.queue_name)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_queue_no_options_fail_on_exist(self):
+ # Arrange
+
+ # Act
+ created = self.sbs.create_queue(self.queue_name, None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_queue_with_options(self):
+ # Arrange
+
+ # Act
+ queue_options = Queue()
+ queue_options.max_size_in_megabytes = '5120'
+ queue_options.default_message_time_to_live = 'PT1M'
+ created = self.sbs.create_queue(self.queue_name, queue_options)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_queue_with_already_existing_queue(self):
+ # Arrange
+
+ # Act
+ created1 = self.sbs.create_queue(self.queue_name)
+ created2 = self.sbs.create_queue(self.queue_name)
+
+ # Assert
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
+ def test_create_queue_with_already_existing_queue_fail_on_exist(self):
+ # Arrange
+
+ # Act
+ created = self.sbs.create_queue(self.queue_name)
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.create_queue(self.queue_name, None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_get_queue_with_existing_queue(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+
+ # Act
+ queue = self.sbs.get_queue(self.queue_name)
+
+ # Assert
+ self.assertIsNotNone(queue)
+ self.assertEquals(queue.name, self.queue_name)
+
+ def test_get_queue_with_non_existing_queue(self):
+ # Arrange
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ resp = self.sbs.get_queue(self.queue_name)
+
+ # Assert
+
+ def test_list_queues(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+
+ # Act
+ queues = self.sbs.list_queues()
+ for queue in queues:
+ name = queue.name
+
+ # Assert
+ self.assertIsNotNone(queues)
+ self.assertNamedItemInContainer(queues, self.queue_name)
+
+ def test_delete_queue_with_existing_queue(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+
+ # Act
+ deleted = self.sbs.delete_queue(self.queue_name)
+
+ # Assert
+ self.assertTrue(deleted)
+ queues = self.sbs.list_queues()
+ self.assertNamedItemNotInContainer(queues, self.queue_name)
+
+ def test_delete_queue_with_existing_queue_fail_not_exist(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+
+ # Act
+ deleted = self.sbs.delete_queue(self.queue_name, True)
+
+ # Assert
+ self.assertTrue(deleted)
+ queues = self.sbs.list_queues()
+ self.assertNamedItemNotInContainer(queues, self.queue_name)
+
+ def test_delete_queue_with_non_existing_queue(self):
+ # Arrange
+
+ # Act
+ deleted = self.sbs.delete_queue(self.queue_name)
+
+ # Assert
+ self.assertFalse(deleted)
+
+ def test_delete_queue_with_non_existing_queue_fail_not_exist(self):
+ # Arrange
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.delete_queue(self.queue_name, True)
+
+ # Assert
+
+ def test_send_queue_message(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+ sent_msg = Message('send message')
+
+ # Act
+ self.sbs.send_queue_message(self.queue_name, sent_msg)
+
+ # Assert
+
+ def test_receive_queue_message_read_delete_mode(self):
+ # Assert
+ sent_msg = Message('receive message')
+ self._create_queue_and_send_msg(self.queue_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_queue_message(self.queue_name, False)
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+
+ def test_receive_queue_message_read_delete_mode_throws_on_delete(self):
+ # Assert
+ sent_msg = Message('receive message')
+ self._create_queue_and_send_msg(self.queue_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_queue_message(self.queue_name, False)
+ with self.assertRaises(WindowsAzureError):
+ received_msg.delete()
+
+ # Assert
+
+ def test_receive_queue_message_read_delete_mode_throws_on_unlock(self):
+ # Assert
+ sent_msg = Message('receive message')
+ self._create_queue_and_send_msg(self.queue_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_queue_message(self.queue_name, False)
+ with self.assertRaises(WindowsAzureError):
+ received_msg.unlock()
+
+ # Assert
+
+ def test_receive_queue_message_peek_lock_mode(self):
+ # Arrange
+ sent_msg = Message('peek lock message')
+ self._create_queue_and_send_msg(self.queue_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_queue_message(self.queue_name, True)
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+
+ def test_receive_queue_message_delete(self):
+ # Arrange
+ sent_msg = Message('peek lock message delete')
+ self._create_queue_and_send_msg(self.queue_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_queue_message(self.queue_name, True)
+ received_msg.delete()
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+
+ def test_receive_queue_message_unlock(self):
+ # Arrange
+ sent_msg = Message('peek lock message unlock')
+ self._create_queue_and_send_msg(self.queue_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_queue_message(self.queue_name, True)
+ received_msg.unlock()
+
+ # Assert
+ received_again_msg = self.sbs.receive_queue_message(self.queue_name, True)
+ received_again_msg.delete()
+ self.assertIsNotNone(received_msg)
+ self.assertIsNotNone(received_again_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+ self.assertEquals(received_again_msg.body, received_msg.body)
+
+ def test_send_queue_message_with_custom_message_type(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+
+ # Act
+ sent_msg = Message('peek lock message custom message type', type='text/xml')
+ self.sbs.send_queue_message(self.queue_name, sent_msg)
+ received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
+ received_msg.delete()
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals('text/xml', received_msg.type)
+
+ def test_send_queue_message_with_custom_message_properties(self):
+ # Arrange
+ self._create_queue(self.queue_name)
+
+ # Act
+ sent_msg = Message('message with properties', custom_properties={'hello':'world', 'foo':42})
+ self.sbs.send_queue_message(self.queue_name, sent_msg)
+ received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
+ received_msg.delete()
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(received_msg.custom_properties['hello'], 'world')
+ self.assertEquals(received_msg.custom_properties['foo'], '42') # TODO: note that the integer became a string
+
+ #--Test cases for topics/subscriptions ------------------------------------
+ def test_create_topic_no_options(self):
+ # Arrange
+
+ # Act
+ created = self.sbs.create_topic(self.topic_name)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_topic_no_options_fail_on_exist(self):
+ # Arrange
+
+ # Act
+ created = self.sbs.create_topic(self.topic_name, None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_topic_with_options(self):
+ # Arrange
+
+ # Act
+ topic_options = Topic()
+ topic_options.max_size_in_megabytes = '5120'
+ topic_options.default_message_time_to_live = 'PT1M'
+ created = self.sbs.create_topic(self.topic_name, topic_options)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_topic_with_already_existing_topic(self):
+ # Arrange
+
+ # Act
+ created1 = self.sbs.create_topic(self.topic_name)
+ created2 = self.sbs.create_topic(self.topic_name)
+
+ # Assert
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
+ def test_create_topic_with_already_existing_topic_fail_on_exist(self):
+ # Arrange
+
+ # Act
+ created = self.sbs.create_topic(self.topic_name)
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.create_topic(self.topic_name, None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_get_topic_with_existing_topic(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ topic = self.sbs.get_topic(self.topic_name)
+
+ # Assert
+ self.assertIsNotNone(topic)
+ self.assertEquals(topic.name, self.topic_name)
+
+ def test_get_topic_with_non_existing_topic(self):
+ # Arrange
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.get_topic(self.topic_name)
+
+ # Assert
+
+ def test_list_topics(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ topics = self.sbs.list_topics()
+ for topic in topics:
+ name = topic.name
+
+ # Assert
+ self.assertIsNotNone(topics)
+ self.assertNamedItemInContainer(topics, self.topic_name)
+
+ def test_delete_topic_with_existing_topic(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ deleted = self.sbs.delete_topic(self.topic_name)
+
+ # Assert
+ self.assertTrue(deleted)
+ topics = self.sbs.list_topics()
+ self.assertNamedItemNotInContainer(topics, self.topic_name)
+
+ def test_delete_topic_with_existing_topic_fail_not_exist(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ deleted = self.sbs.delete_topic(self.topic_name, True)
+
+ # Assert
+ self.assertTrue(deleted)
+ topics = self.sbs.list_topics()
+ self.assertNamedItemNotInContainer(topics, self.topic_name)
+
+ def test_delete_topic_with_non_existing_topic(self):
+ # Arrange
+
+ # Act
+ deleted = self.sbs.delete_topic(self.topic_name)
+
+ # Assert
+ self.assertFalse(deleted)
+
+ def test_delete_topic_with_non_existing_topic_fail_not_exist(self):
+ # Arrange
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.delete_topic(self.topic_name, True)
+
+ # Assert
+
+ def test_create_subscription(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ created = self.sbs.create_subscription(self.topic_name, 'MySubscription')
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_subscription_fail_on_exist(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ created = self.sbs.create_subscription(self.topic_name, 'MySubscription', None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_subscription_with_already_existing_subscription(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ created1 = self.sbs.create_subscription(self.topic_name, 'MySubscription')
+ created2 = self.sbs.create_subscription(self.topic_name, 'MySubscription')
+
+ # Assert
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
+ def test_create_subscription_with_already_existing_subscription_fail_on_exist(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ created = self.sbs.create_subscription(self.topic_name, 'MySubscription')
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.create_subscription(self.topic_name, 'MySubscription', None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_list_subscriptions(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription2')
+
+ # Act
+ subscriptions = self.sbs.list_subscriptions(self.topic_name)
+
+ # Assert
+ self.assertIsNotNone(subscriptions)
+ self.assertEquals(len(subscriptions), 1)
+ self.assertEquals(subscriptions[0].name, 'MySubscription2')
+
+ def test_get_subscription_with_existing_subscription(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription3')
+
+ # Act
+ subscription = self.sbs.get_subscription(self.topic_name, 'MySubscription3')
+
+ # Assert
+ self.assertIsNotNone(subscription)
+ self.assertEquals(subscription.name, 'MySubscription3')
+
+ def test_get_subscription_with_non_existing_subscription(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription3')
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.get_subscription(self.topic_name, 'MySubscription4')
+
+ # Assert
+
+ def test_delete_subscription_with_existing_subscription(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+ self._create_subscription(self.topic_name, 'MySubscription4')
+ self._create_subscription(self.topic_name, 'MySubscription5')
+
+ # Act
+ deleted = self.sbs.delete_subscription(self.topic_name, 'MySubscription4')
+
+ # Assert
+ self.assertTrue(deleted)
+ subscriptions = self.sbs.list_subscriptions(self.topic_name)
+ self.assertIsNotNone(subscriptions)
+ self.assertEquals(len(subscriptions), 1)
+ self.assertEquals(subscriptions[0].name, 'MySubscription5')
+
+ def test_delete_subscription_with_existing_subscription_fail_not_exist(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+ self._create_subscription(self.topic_name, 'MySubscription4')
+ self._create_subscription(self.topic_name, 'MySubscription5')
+
+ # Act
+ deleted = self.sbs.delete_subscription(self.topic_name, 'MySubscription4', True)
+
+ # Assert
+ self.assertTrue(deleted)
+ subscriptions = self.sbs.list_subscriptions(self.topic_name)
+ self.assertIsNotNone(subscriptions)
+ self.assertEquals(len(subscriptions), 1)
+ self.assertEquals(subscriptions[0].name, 'MySubscription5')
+
+ def test_delete_subscription_with_non_existing_subscription(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ deleted = self.sbs.delete_subscription(self.topic_name, 'MySubscription')
+
+ # Assert
+ self.assertFalse(deleted)
+
+ def test_delete_subscription_with_non_existing_subscription_fail_not_exist(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.delete_subscription(self.topic_name, 'MySubscription', True)
+
+ # Assert
+
+ def test_create_rule_no_options(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_no_options_fail_on_exist(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_with_already_existing_rule(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ created1 = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
+ created2 = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
+
+ # Assert
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
+ def test_create_rule_with_already_existing_rule_fail_on_exist(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1')
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', None, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_with_options(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule1 = Rule()
+ rule1.filter_type = 'SqlFilter'
+ rule1.filter_expression = 'foo > 40'
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_list_rules(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule2')
+
+ # Act
+ rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
+
+ # Assert
+ self.assertEquals(len(rules), 2)
+
+ def test_get_rule_with_existing_rule(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule = self.sbs.get_rule(self.topic_name, 'MySubscription', '$Default')
+
+ # Assert
+ self.assertIsNotNone(rule)
+ self.assertEquals(rule.name, '$Default')
+
+ def test_get_rule_with_non_existing_rule(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.get_rule(self.topic_name, 'MySubscription', 'NonExistingRule')
+
+ # Assert
+
+ def test_delete_rule_with_existing_rule(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule3')
+ resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule4')
+
+ # Act
+ deleted1 = self.sbs.delete_rule(self.topic_name, 'MySubscription', 'MyRule4')
+ deleted2 = self.sbs.delete_rule(self.topic_name, 'MySubscription', '$Default')
+
+ # Assert
+ self.assertTrue(deleted1)
+ self.assertTrue(deleted2)
+ rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
+ self.assertIsNotNone(rules)
+ self.assertEquals(len(rules), 1)
+ self.assertEquals(rules[0].name, 'MyRule3')
+
+ def test_delete_rule_with_existing_rule_fail_not_exist(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule3')
+ resp = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule4')
+
+ # Act
+ deleted1 = self.sbs.delete_rule(self.topic_name, 'MySubscription', 'MyRule4', True)
+ deleted2 = self.sbs.delete_rule(self.topic_name, 'MySubscription', '$Default', True)
+
+ # Assert
+ self.assertTrue(deleted1)
+ self.assertTrue(deleted2)
+ rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
+ self.assertIsNotNone(rules)
+ self.assertEquals(len(rules), 1)
+ self.assertEquals(rules[0].name, 'MyRule3')
+
+ def test_delete_rule_with_non_existing_rule(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ deleted = self.sbs.delete_rule(self.topic_name, 'MySubscription', 'NonExistingRule')
+
+ # Assert
+ self.assertFalse(deleted)
+
+ def test_delete_rule_with_non_existing_rule_fail_not_exist(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.sbs.delete_rule(self.topic_name, 'MySubscription', 'NonExistingRule', True)
+
+ # Assert
+
+ def test_send_topic_message(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+
+ # Act
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Assert
+
+ def test_receive_subscription_message_read_delete_mode(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', False)
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+
+ def test_receive_subscription_message_read_delete_mode_throws_on_delete(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', False)
+ with self.assertRaises(WindowsAzureError):
+ received_msg.delete()
+
+ # Assert
+
+ def test_receive_subscription_message_read_delete_mode_throws_on_unlock(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', False)
+ with self.assertRaises(WindowsAzureError):
+ received_msg.unlock()
+
+ # Assert
+
+ def test_receive_subscription_message_peek_lock_mode(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True, 5)
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+
+ def test_receive_subscription_message_delete(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True, 5)
+ received_msg.delete()
+
+ # Assert
+ self.assertIsNotNone(received_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+
+ def test_receive_subscription_message_unlock(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_msg = Message('subscription message')
+ self.sbs.send_topic_message(self.topic_name, sent_msg)
+
+ # Act
+ received_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True)
+ received_msg.unlock()
+
+ # Assert
+ received_again_msg = self.sbs.receive_subscription_message(self.topic_name, 'MySubscription', True)
+ received_again_msg.delete()
+ self.assertIsNotNone(received_msg)
+ self.assertIsNotNone(received_again_msg)
+ self.assertEquals(sent_msg.body, received_msg.body)
+ self.assertEquals(received_again_msg.body, received_msg.body)
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/windowsazuretest/util.py b/test/windowsazuretest/util.py
new file mode 100644
index 000000000000..5142a81d4202
--- /dev/null
+++ b/test/windowsazuretest/util.py
@@ -0,0 +1,98 @@
+#------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation.
+#
+# This source code is subject to terms and conditions of the Apache License,
+# Version 2.0. A copy of the license can be found in the License.html file at
+# the root of this distribution. If you cannot locate the Apache License,
+# Version 2.0, please send an email to vspython@microsoft.com. By using this
+# source code in any fashion, you are agreeing to be bound by the terms of the
+# Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#------------------------------------------------------------------------------
+
+import json
+import os
+import time
+from exceptions import EnvironmentError
+
+STATUS_OK = 200
+STATUS_CREATED = 201
+STATUS_ACCEPTED = 202
+STATUS_NO_CONTENT = 204
+STATUS_NOT_FOUND = 404
+STATUS_CONFLICT = 409
+
+DEFAULT_SLEEP_TIME = 60
+DEFAULT_LEASE_TIME = 65
+
+#------------------------------------------------------------------------------
+class Credentials(object):
+ '''
+ Azure credentials needed to run Azure client tests.
+ '''
+ def __init__(self):
+ credentialsFilename = "windowsazurecredentials.json"
+ tmpName = os.path.join(os.getcwd(), credentialsFilename)
+ if not os.path.exists(tmpName):
+ if os.environ.has_key("USERPROFILE"):
+ tmpName = os.path.join(os.environ["USERPROFILE"],
+ credentialsFilename)
+ elif os.environ.has_key("HOME"):
+ tmpName = os.path.join(os.environ["HOME"],
+ credentialsFilename)
+ if not os.path.exists(tmpName):
+ errMsg = "Cannot run Azure tests when the expected config file containing Azure credentials, '%s', does not exist!" % (tmpName)
+ raise EnvironmentError(errMsg)
+
+ with open(tmpName, "r") as f:
+ self.ns = json.load(f)
+
+ def getServiceBusKey(self):
+ return self.ns[u'servicebuskey']
+
+ def getServiceBusNamespace(self):
+ return self.ns[u'servicebusns']
+
+ def getStorageServicesKey(self):
+ return self.ns[u'storageserviceskey']
+
+ def getStorageServicesName(self):
+ return self.ns[u'storageservicesname']
+
+ def getHostServiceID(self):
+ return self.ns[u'hostserviceid']
+
+credentials = Credentials()
+
+def getUniqueTestRunID():
+ '''
+ Returns a unique identifier for this particular test run so
+ parallel test runs using the same Azure keys do not interfere
+ with one another.
+
+ TODO:
+ - not really unique now; just machine specific
+ '''
+ from os import environ
+ if environ.has_key("COMPUTERNAME"):
+ ret_val = environ["COMPUTERNAME"]
+ else:
+ import socket
+ ret_val = socket.gethostname()
+ for bad in ["-", "_", " ", "."]:
+ ret_val = ret_val.replace(bad, "")
+ ret_val = ret_val.lower().strip()
+ return ret_val
+
+def getUniqueNameBasedOnCurrentTime(base_name):
+ '''
+ Returns a unique identifier for this particular test run so
+ parallel test runs using the same Azure keys do not interfere
+ with one another.
+ '''
+ cur_time = str(time.clock())
+ for bad in ["-", "_", " ", "."]:
+ cur_time = cur_time.replace(bad, "")
+ cur_time = cur_time.lower().strip()
+ return base_name + cur_time