diff --git a/ChangeLog.txt b/ChangeLog.txt
index 24d89d54d79f..bbee3e0ff46c 100644
--- a/ChangeLog.txt
+++ b/ChangeLog.txt
@@ -1,3 +1,19 @@
+2013-07-08 Version 0.7.0
+ * Added service bus management API
+ * Added support for list blobs delimiter (for easier hierarchical listings)
+ * Fixes for bugs:
+ #90 get_blob_metadata returns more than the metadata (also get_container_metadata and get_queue_metadata)
+ #87 Proxy support for *NIX systems
+ #86 Fix capitalization in the 'Fingerprint' tag for XML of serialization of SSH keys configuration
+ #83 Fixed an issue that prevented the creation of endpoints for a VM
+ #80 Error deserializing datetime value from Table Store
+ #79 Specify VirtualNetworkName when creating Virtual Machine
+ * Cleanup of imports
+ * Renamed some private functions that weren't starting with an underscore
+ * Removed code generator (it's now obsolete, we make changes directly in the Python sources)
+
+ Thank you to timanovsky, sebhomengo, pneumee, ogrisel, 0xc0decafe and apatard for their bug reports and fixes.
+
2013-03-20 Version 0.6.2
* Fixes for bugs:
#75 crash on python 2.7 x64 windows
@@ -14,5 +30,6 @@
* Added service management API
* Added ability to specify custom hosts
* Added proxy server support (HTTP CONNECT tunneling)
+
2012-06-06 Version 0.5.0
* Initial Release
diff --git a/src/azure.pyproj b/src/azure.pyproj
index fc124380af94..80450234e0c5 100644
--- a/src/azure.pyproj
+++ b/src/azure.pyproj
@@ -37,6 +37,8 @@
+
+
diff --git a/src/azure.sln b/src/azure.sln
index dda1bcbdb4f6..a9e075ed5e50 100644
--- a/src/azure.sln
+++ b/src/azure.sln
@@ -1,11 +1,22 @@
-Microsoft Visual Studio Solution File, Format Version 11.00
-# Visual Studio 2010
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 2012
Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "azure", "azure.pyproj", "{25B2C65A-0553-4452-8907-8B5B17544E68}"
EndProject
Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "azuretest", "..\test\azuretest.pyproj", "{C0742A2D-4862-40E4-8A28-036EECDBC614}"
EndProject
Global
+ GlobalSection(TeamFoundationVersionControl) = preSolution
+ SccNumberOfProjects = 3
+ SccEnterpriseProvider = {4CA58AB2-18FA-4F8D-95D4-32DDF27D184C}
+ SccTeamFoundationServer = http://tcvstf:8080/tfs/tc
+ SccLocalPath0 = .
+ SccProjectUniqueName1 = azure.pyproj
+ SccLocalPath1 = .
+ SccProjectUniqueName2 = ..\\test\\azuretest.pyproj
+ SccProjectName2 = ../test
+ SccLocalPath2 = ..\\test
+ EndGlobalSection
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
diff --git a/src/azure/__init__.py b/src/azure/__init__.py
index 94a5ab1765eb..b8909866f8d1 100644
--- a/src/azure/__init__.py
+++ b/src/azure/__init__.py
@@ -12,18 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
+import ast
+import base64
import sys
import types
+import urllib2
+
from datetime import datetime
from xml.dom import minidom
-import base64
-import urllib2
-import ast
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
+__author__ = 'Microsoft Corp. '
+__version__ = '0.7.0'
+
#Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
@@ -60,7 +64,7 @@
_ERROR_VALUE_SHOULD_NOT_BE_NULL = '%s should not be None.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = 'Cannot serialize the specified value (%s) to an entity. Please use an EntityProperty (which can specify custom types), int, str, bool, or datetime'
-_USER_AGENT_STRING = 'pyazure'
+_USER_AGENT_STRING = 'pyazure/' + __version__
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
@@ -661,7 +665,7 @@ def _parse_response_for_dict(response):
return return_dict
-def _parse_response_for_dict_prefix(response, prefix):
+def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response header. Filter out the standard http headers.'''
if response is None:
@@ -670,7 +674,7 @@ def _parse_response_for_dict_prefix(response, prefix):
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.iteritems():
- for prefix_value in prefix:
+ for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
diff --git a/src/azure/http/httpclient.py b/src/azure/http/httpclient.py
index 3cc85c7e693e..8ff6acf173bb 100644
--- a/src/azure/http/httpclient.py
+++ b/src/azure/http/httpclient.py
@@ -54,6 +54,10 @@ def __init__(self, service_instance, cert_file=None, account_name=None, account_
self.protocol = protocol
self.proxy_host = None
self.proxy_port = None
+ if protocol == 'http':
+ self.port = httplib.HTTP_PORT
+ else:
+ self.port = httplib.HTTPS_PORT
def set_proxy(self, host, port):
'''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
@@ -69,17 +73,36 @@ def get_connection(self, request):
if sys.platform.lower().startswith('win'):
import azure.http.winhttp
_connection = azure.http.winhttp._HTTPConnection(request.host, cert_file=self.cert_file, protocol=self.protocol)
- elif self.protocol == 'http':
- _connection = httplib.HTTPConnection(request.host)
+ proxy_host = self.proxy_host
+ proxy_port = self.proxy_port
else:
- _connection = httplib.HTTPSConnection(request.host, cert_file=self.cert_file)
+ if self.proxy_host:
+ proxy_host = request.host
+ proxy_port = self.port
+ host = self.proxy_host
+ port = self.proxy_port
+ else:
+ host = request.host
+ port = self.port
+
+ if self.protocol == 'http':
+ _connection = httplib.HTTPConnection(host, int(port))
+ else:
+ _connection = httplib.HTTPSConnection(host, int(port), cert_file=self.cert_file)
if self.proxy_host:
- _connection.set_tunnel(self.proxy_host, self.proxy_port)
+ _connection.set_tunnel(proxy_host, int(proxy_port))
return _connection
def send_request_headers(self, connection, request_headers):
+ if not sys.platform.lower().startswith('win'):
+ if self.proxy_host:
+ for i in connection._buffer:
+ if i.startswith("Host: "):
+ connection._buffer.remove(i)
+ connection.putheader('Host', "%s:%s" % (connection._tunnel_host, connection._tunnel_port))
+
for name, value in request_headers:
if value:
connection.putheader(name, value)
diff --git a/src/azure/servicebus/__init__.py b/src/azure/servicebus/__init__.py
index d10e26dba380..5b2dd23704a2 100644
--- a/src/azure/servicebus/__init__.py
+++ b/src/azure/servicebus/__init__.py
@@ -12,23 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
+import ast
+import httplib
import sys
import time
import urllib2
-from xml.dom import minidom
-import ast
-import httplib
-from datetime import datetime
-
+from datetime import datetime
+from xml.dom import minidom
+from azure import (WindowsAzureData,
+ WindowsAzureError,
+ xml_escape,
+ _create_entry,
+ _general_error_handler,
+ _get_entry_properties,
+ _get_child_nodes,
+ _get_children_from_path,
+ _get_first_child_node_value,
+ _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE,
+ _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK,
+ _ERROR_QUEUE_NOT_FOUND,
+ _ERROR_TOPIC_NOT_FOUND,
+ _USER_AGENT_STRING,
+ )
from azure.http import HTTPError
-from azure import (WindowsAzureError, WindowsAzureData, _general_error_handler,
- _create_entry, _get_entry_properties, xml_escape,
- _get_child_nodes, WindowsAzureMissingResourceError,
- WindowsAzureConflictError, _get_serialization_name,
- _get_children_from_path, _get_first_child_node_value,
- _USER_AGENT_STRING)
-import azure
#default rule name for subscription
DEFAULT_RULE_NAME='$Default'
@@ -173,7 +180,7 @@ def delete(self):
elif self._topic_name and self._subscription_name:
self.service_bus_service.delete_subscription_message(self._topic_name, self._subscription_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken'])
else:
- raise WindowsAzureError(azure._ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)
+ raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)
def unlock(self):
''' Unlocks itself if find queue name or topic name and subscription name. '''
@@ -182,7 +189,7 @@ def unlock(self):
elif self._topic_name and self._subscription_name:
self.service_bus_service.unlock_subscription_message(self._topic_name, self._subscription_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken'])
else:
- raise WindowsAzureError(azure._ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)
+ raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)
def add_headers(self, request):
''' add addtional headers to request for message request.'''
@@ -452,7 +459,7 @@ def _convert_xml_to_queue(xmlstr):
invalid_queue = False
if invalid_queue:
- raise WindowsAzureError(azure._ERROR_QUEUE_NOT_FOUND)
+ raise WindowsAzureError(_ERROR_QUEUE_NOT_FOUND)
#extract id, updated and name value from feed entry and set them of queue.
for name, value in _get_entry_properties(xmlstr, True).iteritems():
@@ -513,7 +520,7 @@ def _convert_xml_to_topic(xmlstr):
invalid_topic = False
if invalid_topic:
- raise WindowsAzureError(azure._ERROR_TOPIC_NOT_FOUND)
+ raise WindowsAzureError(_ERROR_TOPIC_NOT_FOUND)
#extract id, updated and name value from feed entry and set them of topic.
for name, value in _get_entry_properties(xmlstr, True).iteritems():
@@ -572,7 +579,7 @@ def _convert_xml_to_subscription(xmlstr):
return subscription
-def convert_subscription_to_xml(subscription):
+def _convert_subscription_to_xml(subscription):
'''
Converts a subscription object to xml to send. The order of each field of subscription
in xml is very important so we cann't simple call convert_class_to_xml.
@@ -602,7 +609,7 @@ def convert_subscription_to_xml(subscription):
subscription_body += ''
return _create_entry(subscription_body)
-def convert_rule_to_xml(rule):
+def _convert_rule_to_xml(rule):
'''
Converts a rule object to xml to send. The order of each field of rule
in xml is very important so we cann't simple call convert_class_to_xml.
@@ -629,7 +636,7 @@ def convert_rule_to_xml(rule):
return _create_entry(rule_body)
-def convert_topic_to_xml(topic):
+def _convert_topic_to_xml(topic):
'''
Converts a topic object to xml to send. The order of each field of topic
in xml is very important so we cann't simple call convert_class_to_xml.
@@ -655,7 +662,7 @@ def convert_topic_to_xml(topic):
return _create_entry(topic_body)
-def convert_queue_to_xml(queue):
+def _convert_queue_to_xml(queue):
'''
Converts a queue object to xml to send. The order of each field of queue
in xml is very important so we cann't simple call convert_class_to_xml.
diff --git a/src/azure/servicebus/servicebusservice.py b/src/azure/servicebus/servicebusservice.py
index 79353f00156e..ff12712b15e4 100644
--- a/src/azure/servicebus/servicebusservice.py
+++ b/src/azure/servicebus/servicebusservice.py
@@ -12,48 +12,61 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-import base64
import os
-import urllib2
+from azure import (WindowsAzureError,
+ SERVICE_BUS_HOST_BASE,
+ _convert_response_to_feeds,
+ _dont_fail_not_exist,
+ _dont_fail_on_exist,
+ _get_request_body,
+ _int_or_none,
+ _str,
+ _update_request_uri_query,
+ _validate_not_none,
+ )
+from azure.http import (HTTPError,
+ HTTPRequest,
+ )
from azure.http.httpclient import _HTTPClient
-from azure.http import HTTPError, HTTP_RESPONSE_NO_CONTENT
-from azure.servicebus import (_update_service_bus_header, _create_message,
- convert_topic_to_xml, _convert_response_to_topic,
- convert_queue_to_xml, _convert_response_to_queue,
- convert_subscription_to_xml, _convert_response_to_subscription,
- convert_rule_to_xml, _convert_response_to_rule,
- _convert_xml_to_queue, _convert_xml_to_topic,
- _convert_xml_to_subscription, _convert_xml_to_rule,
- _service_bus_error_handler, AZURE_SERVICEBUS_NAMESPACE,
- AZURE_SERVICEBUS_ACCESS_KEY, AZURE_SERVICEBUS_ISSUER)
-from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
-from azure import (_validate_not_none, Feed,
- _convert_response_to_feeds, _str, _str_or_none, _int_or_none,
- _get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
- WindowsAzureError, _parse_response, _convert_class_to_xml,
- _parse_response_for_dict, _parse_response_for_dict_prefix,
- _parse_response_for_dict_filter,
- _parse_enum_results_list, _update_request_uri_query_local_storage,
- _parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
+from azure.servicebus import (AZURE_SERVICEBUS_NAMESPACE,
+ AZURE_SERVICEBUS_ACCESS_KEY,
+ AZURE_SERVICEBUS_ISSUER,
+ _convert_topic_to_xml,
+ _convert_response_to_topic,
+ _convert_queue_to_xml,
+ _convert_response_to_queue,
+ _convert_subscription_to_xml,
+ _convert_response_to_subscription,
+ _convert_rule_to_xml,
+ _convert_response_to_rule,
+ _convert_xml_to_queue,
+ _convert_xml_to_topic,
+ _convert_xml_to_subscription,
+ _convert_xml_to_rule,
+ _create_message,
+ _service_bus_error_handler,
+ _update_service_bus_header,
+ )
class ServiceBusService:
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
- Creates a new queue. Once created, this queue's resource manifest is immutable.
+ Creates a new queue. Once created, this queue's resource manifest is
+ immutable.
- queue: queue object to create.
- queue_name: the name of the queue.
- fail_on_exist: specify whether to throw an exception when the queue exists.
+ queue_name: Name of the queue to create.
+ queue: Queue object to create.
+ fail_on_exist:
+ Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
- request.body = _get_request_body(convert_queue_to_xml(queue))
+ request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
@@ -69,10 +82,12 @@ def create_queue(self, queue_name, queue=None, fail_on_exist=False):
def delete_queue(self, queue_name, fail_not_exist=False):
'''
- Deletes an existing queue. This operation will also remove all associated state
- including messages in the queue.
+ Deletes an existing queue. This operation will also remove all
+ associated state including messages in the queue.
- fail_not_exist: specify whether to throw an exception if the queue doesn't exist.
+ queue_name: Name of the queue to delete.
+ fail_not_exist:
+ Specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -96,7 +111,7 @@ def get_queue(self, queue_name):
'''
Retrieves an existing queue.
- queue_name: name of the queue.
+ queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -125,18 +140,20 @@ def list_queues(self):
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
- Creates a new topic. Once created, this topic resource manifest is immutable.
+ Creates a new topic. Once created, this topic resource manifest is
+ immutable.
- topic_name: name of the topic.
- topic: the Topic object to create.
- fail_on_exist: specify whether to throw an exception when the topic exists.
+ topic_name: Name of the topic to create.
+ topic: Topic object to create.
+ fail_on_exist:
+ Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
- request.body = _get_request_body(convert_topic_to_xml(topic))
+ request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
@@ -152,11 +169,12 @@ def create_topic(self, topic_name, topic=None, fail_on_exist=False):
def delete_topic(self, topic_name, fail_not_exist=False):
'''
- Deletes an existing topic. This operation will also remove all associated state
- including associated subscriptions.
+ Deletes an existing topic. This operation will also remove all
+ associated state including associated subscriptions.
- topic_name: name of the topic.
- fail_not_exist: specify whether throw exception when topic doesn't exist.
+ topic_name: Name of the topic to delete.
+ fail_not_exist:
+ Specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
@@ -180,7 +198,7 @@ def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
- topic_name: name of the topic.
+ topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
@@ -209,12 +227,14 @@ def list_topics(self):
def create_rule(self, topic_name, subscription_name, rule_name, rule=None, fail_on_exist=False):
'''
- Creates a new rule. Once created, this rule's resource manifest is immutable.
+ Creates a new rule. Once created, this rule's resource manifest is
+ immutable.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- rule_name: name of the rule.
- fail_on_exist: specify whether to throw an exception when the rule exists.
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ rule_name: Name of the rule.
+ fail_on_exist:
+ Specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -223,7 +243,7 @@ def create_rule(self, topic_name, subscription_name, rule_name, rule=None, fail_
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + _str(subscription_name) + '/rules/' + _str(rule_name) + ''
- request.body = _get_request_body(convert_rule_to_xml(rule))
+ request.body = _get_request_body(_convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
@@ -241,11 +261,13 @@ def delete_rule(self, topic_name, subscription_name, rule_name, fail_not_exist=F
'''
Deletes an existing rule.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- rule_name: the name of the rule. DEFAULT_RULE_NAME=$Default. Use DEFAULT_RULE_NAME
- to delete default rule for the subscription.
- fail_not_exist: specify whether throw exception when rule doesn't exist.
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ rule_name:
+ Name of the rule to delete. DEFAULT_RULE_NAME=$Default.
+ Use DEFAULT_RULE_NAME to delete default rule for the subscription.
+ fail_not_exist:
+ Specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -271,9 +293,9 @@ def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- rule_name: name of the rule
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ rule_name: Name of the rule.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -292,8 +314,8 @@ def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -309,12 +331,13 @@ def list_rules(self, topic_name, subscription_name):
def create_subscription(self, topic_name, subscription_name, subscription=None, fail_on_exist=False):
'''
- Creates a new subscription. Once created, this subscription resource manifest is
- immutable.
+ Creates a new subscription. Once created, this subscription resource
+ manifest is immutable.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- fail_on_exist: specify whether throw exception when subscription exists.
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ fail_on_exist:
+ Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -322,7 +345,7 @@ def create_subscription(self, topic_name, subscription_name, subscription=None,
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
- request.body = _get_request_body(convert_subscription_to_xml(subscription))
+ request.body = _get_request_body(_convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
if not fail_on_exist:
@@ -340,9 +363,11 @@ def delete_subscription(self, topic_name, subscription_name, fail_not_exist=Fals
'''
Deletes an existing subscription.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- fail_not_exist: specify whether to throw an exception when the subscription doesn't exist.
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription to delete.
+ fail_not_exist:
+ Specify whether to throw an exception when the subscription
+ doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -367,8 +392,8 @@ def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -386,7 +411,7 @@ def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
- topic_name: the name of the topic
+ topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
@@ -401,13 +426,14 @@ def list_subscriptions(self, topic_name):
def send_topic_message(self, topic_name, message=None):
'''
- Enqueues a message into the specified topic. The limit to the number of messages
- which may be present in the topic is governed by the message size in MaxTopicSizeInBytes.
- If this message causes the topic to exceed its quota, a quota exceeded error is
- returned and the message will be rejected.
+ Enqueues a message into the specified topic. The limit to the number
+ of messages which may be present in the topic is governed by the
+ message size in MaxTopicSizeInBytes. If this message causes the topic
+ to exceed its quota, a quota exceeded error is returned and the
+ message will be rejected.
- topic_name: name of the topic.
- message: the Message object containing message body and properties.
+ topic_name: Name of the topic.
+ message: Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
@@ -422,19 +448,22 @@ def send_topic_message(self, topic_name, message=None):
def peek_lock_subscription_message(self, topic_name, subscription_name, timeout='60'):
'''
- This operation is used to atomically retrieve and lock a message for processing.
- The message is guaranteed not to be delivered to other receivers during the lock
- duration period specified in buffer description. Once the lock expires, the
- message will be available to other receivers (on the same subscription only)
- during the lock duration period specified in the topic description. Once the lock
- expires, the message will be available to other receivers. In order to complete
- processing of the message, the receiver should issue a delete command with the
- lock ID received from this operation. To abandon processing of the message and
- unlock it for other receivers, an Unlock Message command should be issued, or
- the lock duration period can expire.
+ This operation is used to atomically retrieve and lock a message for
+ processing. The message is guaranteed not to be delivered to other
+ receivers during the lock duration period specified in buffer
+ description. Once the lock expires, the message will be available to
+ other receivers (on the same subscription only) during the lock
+ duration period specified in the topic description. Once the lock
+ expires, the message will be available to other receivers. In order to
+ complete processing of the message, the receiver should issue a delete
+ command with the lock ID received from this operation. To abandon
+ processing of the message and unlock it for other receivers, an Unlock
+ Message command should be issued, or the lock duration period can
+ expire.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -451,17 +480,19 @@ def peek_lock_subscription_message(self, topic_name, subscription_name, timeout=
def unlock_subscription_message(self, topic_name, subscription_name, sequence_number, lock_token):
'''
- Unlock a message for processing by other receivers on a given subscription.
- This operation deletes the lock object, causing the message to be unlocked.
- A message must have first been locked by a receiver before this operation
- is called.
+ Unlock a message for processing by other receivers on a given
+ subscription. This operation deletes the lock object, causing the
+ message to be unlocked. A message must have first been locked by a
+ receiver before this operation is called.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- sequence_name: The sequence number of the message to be unlocked as returned
- in BrokerProperties['SequenceNumber'] by the Peek Message operation.
- lock_token: The ID of the lock as returned by the Peek Message operation in
- BrokerProperties['LockToken']
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ sequence_number:
+ The sequence number of the message to be unlocked as returned in
+ BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token:
+ The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -477,13 +508,14 @@ def unlock_subscription_message(self, topic_name, subscription_name, sequence_nu
def read_delete_subscription_message(self, topic_name, subscription_name, timeout='60'):
'''
- Read and delete a message from a subscription as an atomic operation. This
- operation should be used when a best-effort guarantee is sufficient for an
- application; that is, using this operation it is possible for messages to
- be lost if processing fails.
+ Read and delete a message from a subscription as an atomic operation.
+ This operation should be used when a best-effort guarantee is
+ sufficient for an application; that is, using this operation it is
+ possible for messages to be lost if processing fails.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -500,16 +532,19 @@ def read_delete_subscription_message(self, topic_name, subscription_name, timeou
def delete_subscription_message(self, topic_name, subscription_name, sequence_number, lock_token):
'''
- Completes processing on a locked message and delete it from the subscription.
- This operation should only be called after processing a previously locked
- message is successful to maintain At-Least-Once delivery assurances.
+ Completes processing on a locked message and delete it from the
+ subscription. This operation should only be called after processing a
+ previously locked message is successful to maintain At-Least-Once
+ delivery assurances.
- topic_name: the name of the topic
- subscription_name: the name of the subscription
- sequence_name: The sequence number of the message to be deleted as returned
- in BrokerProperties['SequenceNumber'] by the Peek Message operation.
- lock_token: The ID of the lock as returned by the Peek Message operation in
- BrokerProperties['LockToken']
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ sequence_number:
+ The sequence number of the message to be deleted as returned in
+ BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token:
+ The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
@@ -525,13 +560,14 @@ def delete_subscription_message(self, topic_name, subscription_name, sequence_nu
def send_queue_message(self, queue_name, message=None):
'''
- Sends a message into the specified queue. The limit to the number of messages
- which may be present in the topic is governed by the message size the
- MaxTopicSizeInMegaBytes. If this message will cause the queue to exceed its
- quota, a quota exceeded error is returned and the message will be rejected.
+ Sends a message into the specified queue. The limit to the number of
+ messages which may be present in the topic is governed by the message
+ size the MaxTopicSizeInMegaBytes. If this message will cause the queue
+ to exceed its quota, a quota exceeded error is returned and the
+ message will be rejected.
- queue_name: name of the queue
- message: the Message object containing message body and properties.
+ queue_name: Name of the queue.
+ message: Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -546,17 +582,18 @@ def send_queue_message(self, queue_name, message=None):
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
- Automically retrieves and locks a message from a queue for processing. The
- message is guaranteed not to be delivered to other receivers (on the same
- subscription only) during the lock duration period specified in the queue
- description. Once the lock expires, the message will be available to other
- receivers. In order to complete processing of the message, the receiver
- should issue a delete command with the lock ID received from this operation.
- To abandon processing of the message and unlock it for other receivers,
- an Unlock Message command should be issued, or the lock duration period
- can expire.
+ Automically retrieves and locks a message from a queue for processing.
+ The message is guaranteed not to be delivered to other receivers (on
+ the same subscription only) during the lock duration period specified
+ in the queue description. Once the lock expires, the message will be
+ available to other receivers. In order to complete processing of the
+ message, the receiver should issue a delete command with the lock ID
+ received from this operation. To abandon processing of the message and
+ unlock it for other receivers, an Unlock Message command should be
+ issued, or the lock duration period can expire.
- queue_name: name of the queue
+ queue_name: Name of the queue.
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -572,16 +609,18 @@ def peek_lock_queue_message(self, queue_name, timeout='60'):
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
- Unlocks a message for processing by other receivers on a given subscription.
- This operation deletes the lock object, causing the message to be unlocked.
- A message must have first been locked by a receiver before this operation is
- called.
+ Unlocks a message for processing by other receivers on a given
+ subscription. This operation deletes the lock object, causing the
+ message to be unlocked. A message must have first been locked by a
+ receiver before this operation is called.
- queue_name: name of the queue
- sequence_name: The sequence number of the message to be unlocked as returned
- in BrokerProperties['SequenceNumber'] by the Peek Message operation.
- lock_token: The ID of the lock as returned by the Peek Message operation in
- BrokerProperties['LockToken']
+ queue_name: Name of the queue.
+ sequence_number:
+ The sequence number of the message to be unlocked as returned in
+ BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token:
+ The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
@@ -596,12 +635,13 @@ def unlock_queue_message(self, queue_name, sequence_number, lock_token):
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
- Reads and deletes a message from a queue as an atomic operation. This operation
- should be used when a best-effort guarantee is sufficient for an application;
- that is, using this operation it is possible for messages to be lost if
- processing fails.
+ Reads and deletes a message from a queue as an atomic operation. This
+ operation should be used when a best-effort guarantee is sufficient
+ for an application; that is, using this operation it is possible for
+ messages to be lost if processing fails.
- queue_name: name of the queue
+ queue_name: Name of the queue.
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -617,15 +657,18 @@ def read_delete_queue_message(self, queue_name, timeout='60'):
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
- Completes processing on a locked message and delete it from the queue. This
- operation should only be called after processing a previously locked message
- is successful to maintain At-Least-Once delivery assurances.
+ Completes processing on a locked message and delete it from the queue.
+ This operation should only be called after processing a previously
+ locked message is successful to maintain At-Least-Once delivery
+ assurances.
- queue_name: name of the queue
- sequence_name: The sequence number of the message to be deleted as returned
- in BrokerProperties['SequenceNumber'] by the Peek Message operation.
- lock_token: The ID of the lock as returned by the Peek Message operation in
- BrokerProperties['LockToken']
+ queue_name: Name of the queue.
+ sequence_number:
+ The sequence number of the message to be deleted as returned in
+ BrokerProperties['SequenceNumber'] by the Peek Message operation.
+ lock_token:
+ The ID of the lock as returned by the Peek Message operation in
+ BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
@@ -638,19 +681,37 @@ def delete_queue_message(self, queue_name, sequence_number, lock_token):
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
response = self._perform_request(request)
-
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
+ '''
+ Receive a message from a queue for processing.
+
+ queue_name: Name of the queue.
+ peek_lock:
+ Optional. True to retrieve and lock the message. False to read and
+ delete the message. Default is True (lock).
+ timeout: Optional. The timeout parameter is expressed in seconds.
+ '''
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
-
+
def receive_subscription_message(self, topic_name, subscription_name, peek_lock=True, timeout=60):
+ '''
+ Receive a message from a subscription for processing.
+
+ topic_name: Name of the topic.
+ subscription_name: Name of the subscription.
+ peek_lock:
+ Optional. True to retrieve and lock the message. False to read and
+ delete the message. Default is True (lock).
+ timeout: Optional. The timeout parameter is expressed in seconds.
+ '''
if peek_lock:
return self.peek_lock_subscription_message(topic_name, subscription_name, timeout)
else:
return self.read_delete_subscription_message(topic_name, subscription_name, timeout)
-
+
def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE):
#x_ms_version is not used, but the parameter is kept for backwards compatibility
self.requestid = None
@@ -678,12 +739,14 @@ def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_v
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
- '''Returns a new service which will process requests with the
- specified filter. Filtering operations can include logging, automatic
- retrying, etc... The filter is a lambda which receives the HTTPRequest
- and another lambda. The filter can perform any pre-processing on the
- request, pass it off to the next lambda, and then perform any post-processing
- on the response.'''
+ '''
+ Returns a new service which will process requests with the specified
+ filter. Filtering operations can include logging, automatic retrying,
+ etc... The filter is a lambda which receives the HTTPRequest and
+ another lambda. The filter can perform any pre-processing on the
+ request, pass it off to the next lambda, and then perform any
+ post-processing on the response.
+ '''
res = ServiceBusService(self.service_namespace, self.account_key,
self.issuer)
old_filter = self._filter
@@ -694,7 +757,9 @@ def new_filter(request):
return res
def set_proxy(self, host, port):
- '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ '''
+ Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
+ '''
self._httpclient.set_proxy(host, port)
def _get_host(self):
diff --git a/src/azure/servicemanagement/__init__.py b/src/azure/servicemanagement/__init__.py
index 23c31aae4f41..98e8951ca863 100644
--- a/src/azure/servicemanagement/__init__.py
+++ b/src/azure/servicemanagement/__init__.py
@@ -13,10 +13,19 @@
# limitations under the License.
#--------------------------------------------------------------------------
import base64
-from azure.http import HTTPError
-from azure import (WindowsAzureError, WindowsAzureData, _general_error_handler,
- _str, _list_of, _scalar_list_of, _dict_of, _Base64String)
-import azure
+
+from xml.dom import minidom
+from azure import (WindowsAzureData,
+ _Base64String,
+ _create_entry,
+ _dict_of,
+ _general_error_handler,
+ _get_children_from_path,
+ _get_first_child_node_value,
+ _list_of,
+ _scalar_list_of,
+ _str,
+ )
#-----------------------------------------------------------------------------
# Constants for Azure app environment settings.
@@ -569,9 +578,9 @@ def __getitem__(self, index):
return self.public_keys[index]
class PublicKey(WindowsAzureData):
- def __init__(self):
- self.finger_print = ''
- self.path = ''
+ def __init__(self, fingerprint=u'', path=u''):
+ self.fingerprint = fingerprint
+ self.path = path
class KeyPairs(WindowsAzureData):
def __init__(self):
@@ -587,9 +596,9 @@ def __getitem__(self, index):
return self.key_pairs[index]
class KeyPair(WindowsAzureData):
- def __init__(self):
- self.finger_print = u''
- self.path = u''
+ def __init__(self, fingerprint=u'', path=u''):
+ self.fingerprint = fingerprint
+ self.path = path
class LoadBalancerProbe(WindowsAzureData):
def __init__(self):
@@ -632,6 +641,24 @@ class AsynchronousOperationResult(WindowsAzureData):
def __init__(self, request_id=None):
self.request_id = request_id
+class ServiceBusRegion(WindowsAzureData):
+ def __init__(self):
+ self.code = u''
+ self.fullname = u''
+
+class ServiceBusNamespace(WindowsAzureData):
+ def __init__(self):
+ self.name = u''
+ self.region = u''
+ self.default_key = u''
+ self.status = u''
+ self.created_at = u''
+ self.acs_management_endpoint = u''
+ self.servicebus_endpoint = u''
+ self.connection_string = u''
+ self.subscription_id = u''
+ self.enabled = False
+
def _update_management_header(request):
''' Add additional headers for management. '''
@@ -880,14 +907,14 @@ def linux_configuration_to_xml(configuration):
xml += ''
for key in configuration.ssh.public_keys:
xml += ''
- xml += _XmlSerializer.data_to_xml([('FingerPrint', key.finger_print),
+ xml += _XmlSerializer.data_to_xml([('Fingerprint', key.fingerprint),
('Path', key.path)])
xml += ''
xml += ''
xml += ''
for key in configuration.ssh.key_pairs:
xml += ''
- xml += _XmlSerializer.data_to_xml([('FingerPrint', key.finger_print),
+ xml += _XmlSerializer.data_to_xml([('Fingerprint', key.fingerprint),
('Path', key.path)])
xml += ''
xml += ''
@@ -900,8 +927,7 @@ def network_configuration_to_xml(configuration):
xml += ''
for endpoint in configuration.input_endpoints:
xml += ''
- xml += _XmlSerializer.data_to_xml([('EnableDirectServerReturn', endpoint.enable_direct_server_return, _lower),
- ('LoadBalancedEndpointSetName', endpoint.load_balanced_endpoint_set_name),
+ xml += _XmlSerializer.data_to_xml([('LoadBalancedEndpointSetName', endpoint.load_balanced_endpoint_set_name),
('LocalPort', endpoint.local_port),
('Name', endpoint.name),
('Port', endpoint.port)])
@@ -913,7 +939,9 @@ def network_configuration_to_xml(configuration):
('Protocol', endpoint.load_balancer_probe.protocol)])
xml += ''
- xml += _XmlSerializer.data_to_xml([('Protocol', endpoint.protocol)])
+ xml += _XmlSerializer.data_to_xml([('Protocol', endpoint.protocol),
+ ('EnableDirectServerReturn', endpoint.enable_direct_server_return, _lower)])
+
xml += ''
xml += ''
xml += ''
@@ -971,7 +999,7 @@ def role_to_xml(availability_set_name, data_virtual_hard_disks, network_configur
if role_size is not None:
xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])
-
+
return xml
@staticmethod
@@ -1000,7 +1028,7 @@ def capture_role_to_xml(post_capture_action, target_image_name, target_image_lab
return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
@staticmethod
- def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size, virtual_network_name):
xml = _XmlSerializer.data_to_xml([('Name', deployment_name),
('DeploymentSlot', deployment_slot),
('Label', label, base64.b64encode)])
@@ -1009,6 +1037,10 @@ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, r
xml += _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set)
xml += ''
xml += ''
+
+ if virtual_network_name is not None:
+ xml += _XmlSerializer.data_to_xml([('VirtualNetworkName', virtual_network_name)])
+
return _XmlSerializer.doc_from_xml('Deployment', xml)
@staticmethod
@@ -1060,4 +1092,139 @@ def extended_properties_dict_to_xml_fragment(extended_properties):
xml += ''
return xml
+def _parse_bool(value):
+ if value.lower() == 'true':
+ return True
+ return False
+
+class _ServiceBusManagementXmlSerializer(object):
+ @staticmethod
+ def namespace_to_xml(name, region):
+ '''Converts a service bus namespace description to xml
+
+ The xml format:
+
+
+
+
+ West US
+
+
+
+ '''
+ body = ''
+ body += ''.join(['', region, ''])
+ body += ''
+
+ return _create_entry(body)
+
+ @staticmethod
+ def xml_to_namespace(xmlstr):
+ '''Converts xml response to service bus namespace
+
+ The xml format for namespace:
+
+ uuid:00000000-0000-0000-0000-000000000000;id=0000000
+ myunittests
+ 2012-08-22T16:48:10Z
+
+
+ myunittests
+ West US
+ 0000000000000000000000000000000000000000000=
+ Active
+ 2012-08-22T16:48:10.217Z
+ https://myunittests-sb.accesscontrol.windows.net/
+ https://myunittests.servicebus.windows.net/
+ Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000=
+ 00000000000000000000000000000000
+ true
+
+
+
+ '''
+ xmldoc = minidom.parseString(xmlstr)
+ namespace = ServiceBusNamespace()
+
+ mappings = (
+ ('Name', 'name', None),
+ ('Region', 'region', None),
+ ('DefaultKey', 'default_key', None),
+ ('Status', 'status', None),
+ ('CreatedAt', 'created_at', None),
+ ('AcsManagementEndpoint', 'acs_management_endpoint', None),
+ ('ServiceBusEndpoint', 'servicebus_endpoint', None),
+ ('ConnectionString', 'connection_string', None),
+ ('SubscriptionId', 'subscription_id', None),
+ ('Enabled', 'enabled', _parse_bool),
+ )
+
+ for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'NamespaceDescription'):
+ for xml_name, field_name, conversion_func in mappings:
+ node_value = _get_first_child_node_value(desc, xml_name)
+ if node_value is not None:
+ if conversion_func is not None:
+ node_value = conversion_func(node_value)
+ setattr(namespace, field_name, node_value)
+
+ return namespace
+
+ @staticmethod
+ def xml_to_region(xmlstr):
+ '''Converts xml response to service bus region
+
+ The xml format for region:
+
+ uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759
+
+ 2013-04-10T18:25:29Z
+
+
+ East Asia
+ East Asia
+
+
+
+ '''
+ xmldoc = minidom.parseString(xmlstr)
+ region = ServiceBusRegion()
+
+ for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'RegionCodeDescription'):
+ node_value = _get_first_child_node_value(desc, 'Code')
+ if node_value is not None:
+ region.code = node_value
+ node_value = _get_first_child_node_value(desc, 'FullName')
+ if node_value is not None:
+ region.fullname = node_value
+
+ return region
+
+ @staticmethod
+ def xml_to_namespace_availability(xmlstr):
+ '''Converts xml response to service bus namespace availability
+
+ The xml format:
+
+
+ uuid:9fc7c652-1856-47ab-8d74-cd31502ea8e6;id=3683292
+
+ 2013-04-16T03:03:37Z
+
+
+ false
+
+
+
+ '''
+ xmldoc = minidom.parseString(xmlstr)
+ availability = AvailabilityResponse()
+
+ for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'NamespaceAvailability'):
+ node_value = _get_first_child_node_value(desc, 'Result')
+ if node_value is not None:
+ availability.result = _parse_bool(node_value)
+
+ return availability
+
from azure.servicemanagement.servicemanagementservice import ServiceManagementService
+from azure.servicemanagement.servicebusmanagementservice import ServiceBusManagementService
diff --git a/src/azure/servicemanagement/servicebusmanagementservice.py b/src/azure/servicemanagement/servicebusmanagementservice.py
new file mode 100644
index 000000000000..c25bb7e2c2a2
--- /dev/null
+++ b/src/azure/servicemanagement/servicebusmanagementservice.py
@@ -0,0 +1,86 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+from azure import (MANAGEMENT_HOST,
+ _convert_response_to_feeds,
+ _str,
+ _validate_not_none,
+ )
+from azure.servicemanagement import _ServiceBusManagementXmlSerializer
+from azure.servicemanagement.servicemanagementclient import _ServiceManagementClient
+
+class ServiceBusManagementService(_ServiceManagementClient):
+ def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST):
+ return super(ServiceBusManagementService, self).__init__(subscription_id, cert_file, host)
+
+ #--Operations for service bus ----------------------------------------
+ def get_regions(self):
+ '''
+ Get list of available service bus regions.
+ '''
+ response = self._perform_get(self._get_path('services/serviceBus/Regions/', None),
+ None)
+ return _convert_response_to_feeds(response, _ServiceBusManagementXmlSerializer.xml_to_region)
+
+ def list_namespaces(self):
+ '''
+ List the service bus namespaces defined on the account.
+ '''
+ response = self._perform_get(self._get_path('services/serviceBus/Namespaces/', None),
+ None)
+ return _convert_response_to_feeds(response, _ServiceBusManagementXmlSerializer.xml_to_namespace)
+
+ def get_namespace(self, name):
+ '''
+ Get details about a specific namespace.
+
+ name: Name of the service bus namespace.
+ '''
+ response = self._perform_get(self._get_path('services/serviceBus/Namespaces', name),
+ None)
+ return _ServiceBusManagementXmlSerializer.xml_to_namespace(response.body)
+
+ def create_namespace(self, name, region):
+ '''
+ Create a new service bus namespace.
+
+ name: Name of the service bus namespace to create.
+ region: Region to create the namespace in.
+ '''
+ _validate_not_none('name', name)
+ return self._perform_put(self._get_path('services/serviceBus/Namespaces', name),
+ _ServiceBusManagementXmlSerializer.namespace_to_xml(name, region));
+
+ def delete_namespace(self, name):
+ '''
+ Delete a service bus namespace.
+
+ name: Name of the service bus namespace to delete.
+ '''
+ _validate_not_none('name', name)
+ return self._perform_delete(self._get_path('services/serviceBus/Namespaces', name),
+ None);
+
+ def check_namespace_availability(self, name):
+ '''
+ Checks to see if the specified service bus namespace is available, or
+ if it has already been taken.
+
+ name: Name of the service bus namespace to validate.
+ '''
+ _validate_not_none('name', name)
+
+ response = self._perform_get(self._get_path('services/serviceBus/CheckNamespaceAvailability', None) + '/?namespace=' + _str(name),
+ None)
+ return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(response.body)
diff --git a/src/azure/servicemanagement/servicemanagementclient.py b/src/azure/servicemanagement/servicemanagementclient.py
new file mode 100644
index 000000000000..c4ec7b4fbc45
--- /dev/null
+++ b/src/azure/servicemanagement/servicemanagementclient.py
@@ -0,0 +1,147 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+import os
+
+from azure import (WindowsAzureError,
+ MANAGEMENT_HOST,
+ _get_request_body,
+ _parse_response,
+ _str,
+ _update_request_uri_query,
+ )
+from azure.http import (HTTPError,
+ HTTPRequest,
+ )
+from azure.http.httpclient import _HTTPClient
+from azure.servicemanagement import (_management_error_handler,
+ _parse_response_for_async_op,
+ _update_management_header,
+ )
+
+class _ServiceManagementClient(object):
+ def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST):
+ self.requestid = None
+ self.subscription_id = subscription_id
+ self.cert_file = cert_file
+ self.host = host
+
+ if not self.cert_file:
+ if os.environ.has_key(AZURE_MANAGEMENT_CERTFILE):
+ self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
+
+ if not self.subscription_id:
+ if os.environ.has_key(AZURE_MANAGEMENT_SUBSCRIPTIONID):
+ self.subscription_id = os.environ[AZURE_MANAGEMENT_SUBSCRIPTIONID]
+
+ if not self.cert_file or not self.subscription_id:
+ raise WindowsAzureError('You need to provide subscription id and certificate file')
+
+ self._httpclient = _HTTPClient(service_instance=self, cert_file=self.cert_file)
+ self._filter = self._httpclient.perform_request
+
+ def with_filter(self, filter):
+ '''Returns a new service which will process requests with the
+ specified filter. Filtering operations can include logging, automatic
+ retrying, etc... The filter is a lambda which receives the HTTPRequest
+ and another lambda. The filter can perform any pre-processing on the
+ request, pass it off to the next lambda, and then perform any post-processing
+ on the response.'''
+ res = ServiceManagementService(self.subscription_id, self.cert_file)
+ old_filter = self._filter
+ def new_filter(request):
+ return filter(request, old_filter)
+
+ res._filter = new_filter
+ return res
+
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ #--Helper functions --------------------------------------------------
+ def _perform_request(self, request):
+ try:
+ resp = self._filter(request)
+ except HTTPError as e:
+ return _management_error_handler(e)
+
+ return resp
+
+ def _perform_get(self, path, response_type):
+ request = HTTPRequest()
+ request.method = 'GET'
+ request.host = self.host
+ request.path = path
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if response_type is not None:
+ return _parse_response(response, response_type)
+
+ return response
+
+ def _perform_put(self, path, body, async=False):
+ request = HTTPRequest()
+ request.method = 'PUT'
+ request.host = self.host
+ request.path = path
+ request.body = _get_request_body(body)
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _perform_post(self, path, body, response_type=None, async=False):
+ request = HTTPRequest()
+ request.method = 'POST'
+ request.host = self.host
+ request.path = path
+ request.body = _get_request_body(body)
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if response_type is not None:
+ return _parse_response(response, response_type)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _perform_delete(self, path, async=False):
+ request = HTTPRequest()
+ request.method = 'DELETE'
+ request.host = self.host
+ request.path = path
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _get_path(self, resource, name):
+ path = '/' + self.subscription_id + '/' + resource
+ if name is not None:
+ path += '/' + _str(name)
+ return path
diff --git a/src/azure/servicemanagement/servicemanagementservice.py b/src/azure/servicemanagement/servicemanagementservice.py
index 0a7bab088415..aa04ae9625eb 100644
--- a/src/azure/servicemanagement/servicemanagementservice.py
+++ b/src/azure/servicemanagement/servicemanagementservice.py
@@ -12,62 +12,44 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-import base64
-import os
-import urllib2
-
-from azure.http.httpclient import _HTTPClient
-from azure.http import HTTPError
-from azure.servicemanagement import *
-from azure.servicemanagement import (_update_management_header,
+from azure import (WindowsAzureError,
+ MANAGEMENT_HOST,
+ _str,
+ _validate_not_none,
+ )
+from azure.servicemanagement import (AffinityGroups,
+ AffinityGroup,
+ AvailabilityResponse,
+ Certificate,
+ Certificates,
+ DataVirtualHardDisk,
+ Deployment,
+ Disk,
+ Disks,
+ Locations,
+ Operation,
+ HostedService,
+ HostedServices,
+ Images,
+ OperatingSystems,
+ OperatingSystemFamilies,
+ OSImage,
+ PersistentVMRole,
+ StorageService,
+ StorageServices,
+ Subscription,
+ SubscriptionCertificate,
+ SubscriptionCertificates,
_management_error_handler,
+ _update_management_header,
_parse_response_for_async_op,
- _XmlSerializer)
-from azure.http import HTTPRequest
-from azure import (_validate_not_none, _str,
- _get_request_body, _update_request_uri_query,
- WindowsAzureError, _parse_response,
- MANAGEMENT_HOST)
-
-class ServiceManagementService:
+ _XmlSerializer,
+ )
+from azure.servicemanagement.servicemanagementclient import _ServiceManagementClient
+
+class ServiceManagementService(_ServiceManagementClient):
def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST):
- self.requestid = None
- self.subscription_id = subscription_id
- self.cert_file = cert_file
- self.host = host
-
- if not self.cert_file:
- if os.environ.has_key(AZURE_MANAGEMENT_CERTFILE):
- self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
-
- if not self.subscription_id:
- if os.environ.has_key(AZURE_MANAGEMENT_SUBSCRIPTIONID):
- self.subscription_id = os.environ[AZURE_MANAGEMENT_SUBSCRIPTIONID]
-
- if not self.cert_file or not self.subscription_id:
- raise WindowsAzureError('You need to provide subscription id and certificate file')
-
- self._httpclient = _HTTPClient(service_instance=self, cert_file=self.cert_file)
- self._filter = self._httpclient.perform_request
-
- def with_filter(self, filter):
- '''Returns a new service which will process requests with the
- specified filter. Filtering operations can include logging, automatic
- retrying, etc... The filter is a lambda which receives the HTTPRequest
- and another lambda. The filter can perform any pre-processing on the
- request, pass it off to the next lambda, and then perform any post-processing
- on the response.'''
- res = ServiceManagementService(self.subscription_id, self.cert_file)
- old_filter = self._filter
- def new_filter(request):
- return filter(request, old_filter)
-
- res._filter = new_filter
- return res
-
- def set_proxy(self, host, port):
- '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
- self._httpclient.set_proxy(host, port)
+ return super(ServiceManagementService, self).__init__(subscription_id, cert_file, host)
#--Operations for storage accounts -----------------------------------
def list_storage_accounts(self):
@@ -89,7 +71,8 @@ def get_storage_account_properties(self, service_name):
def get_storage_account_keys(self, service_name):
'''
- Returns the primary and secondary access keys for the specified storage account.
+ Returns the primary and secondary access keys for the specified
+ storage account.
service_name: Name of the storage service account.
'''
@@ -99,10 +82,13 @@ def get_storage_account_keys(self, service_name):
def regenerate_storage_account_keys(self, service_name, key_type):
'''
- Regenerates the primary or secondary access key for the specified storage account.
+ Regenerates the primary or secondary access key for the specified
+ storage account.
service_name: Name of the storage service account.
- key_type: Specifies which key to regenerate. Valid values are: Primary, Secondary
+ key_type:
+ Specifies which key to regenerate. Valid values are:
+ Primary, Secondary
'''
_validate_not_none('service_name', service_name)
_validate_not_none('key_type', key_type)
@@ -114,37 +100,39 @@ def create_storage_account(self, service_name, description, label, affinity_grou
'''
Creates a new storage account in Windows Azure.
- service_name: A name for the storage account that is unique within
- Windows Azure. Storage account names must be between 3
- and 24 characters in length and use numbers and
- lower-case letters only.
- description: A description for the storage account. The description
- may be up to 1024 characters in length.
- label: A name for the storage account specified as a base64-encoded
- string. The name may be up to 100 characters in length. The
- name can be used identify the storage account for your tracking
- purposes.
- affinity_group: The name of an existing affinity group in the
- specified subscription. You can specify either a
- location or affinity_group, but not both.
- location: The location where the storage account is created. You can
- specify either a location or affinity_group, but not both.
- geo_replication_enabled: Specifies whether the storage account is
- created with the geo-replication enabled. If
- the element is not included in the request
- body, the default value is true. If set to
- true, the data in the storage account is
- replicated across more than one geographic
- location so as to enable resilience in the
- face of catastrophic service loss.
- extended_properties: Dictionary containing name/value pairs of storage
- account properties. You can have a maximum of 50
- extended property name/value pairs. The maximum
- length of the Name element is 64 characters, only
- alphanumeric characters and underscores are valid
- in the Name, and the name must start with a
- letter. The value has a maximum length of 255
- characters.
+ service_name:
+ A name for the storage account that is unique within Windows Azure.
+ Storage account names must be between 3 and 24 characters in length
+ and use numbers and lower-case letters only.
+ description:
+ A description for the storage account. The description may be up
+ to 1024 characters in length.
+ label:
+ A name for the storage account specified as a base64-encoded
+ string. The name may be up to 100 characters in length. The name
+ can be used identify the storage account for your tracking
+ purposes.
+ affinity_group:
+ The name of an existing affinity group in the specified
+ subscription. You can specify either a location or affinity_group,
+ but not both.
+ location:
+ The location where the storage account is created. You can specify
+ either a location or affinity_group, but not both.
+ geo_replication_enabled:
+ Specifies whether the storage account is created with the
+ geo-replication enabled. If the element is not included in the
+ request body, the default value is true. If set to true, the data
+ in the storage account is replicated across more than one
+ geographic location so as to enable resilience in the face of
+ catastrophic service loss.
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('description', description)
@@ -163,28 +151,27 @@ def update_storage_account(self, service_name, description=None, label=None, geo
geo-replication status for a storage account in Windows Azure.
service_name: Name of the storage service account.
- description: A description for the storage account. The description
- may be up to 1024 characters in length.
- label: A name for the storage account specified as a base64-encoded
- string. The name may be up to 100 characters in length. The
- name can be used identify the storage account for your tracking
- purposes.
- geo_replication_enabled: Specifies whether the storage account is
- created with the geo-replication enabled. If
- the element is not included in the request
- body, the default value is true. If set to
- true, the data in the storage account is
- replicated across more than one geographic
- location so as to enable resilience in the
- face of catastrophic service loss.
- extended_properties: Dictionary containing name/value pairs of storage
- account properties. You can have a maximum of 50
- extended property name/value pairs. The maximum
- length of the Name element is 64 characters, only
- alphanumeric characters and underscores are valid
- in the Name, and the name must start with a
- letter. The value has a maximum length of 255
- characters.
+ description:
+ A description for the storage account. The description may be up
+ to 1024 characters in length.
+ label:
+ A name for the storage account specified as a base64-encoded
+ string. The name may be up to 100 characters in length. The name
+ can be used identify the storage account for your tracking purposes.
+ geo_replication_enabled:
+ Specifies whether the storage account is created with the
+ geo-replication enabled. If the element is not included in the
+ request body, the default value is true. If set to true, the data
+ in the storage account is replicated across more than one
+ geographic location so as to enable resilience in the face of
+ catastrophic service loss.
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
return self._perform_put(self._get_storage_service_path(service_name),
@@ -228,9 +215,9 @@ def get_hosted_service_properties(self, service_name, embed_detail=False):
service's deployments.
service_name: Name of the hosted service.
- embed_detail: When True, the management service returns properties for
- all deployments of the service, as well as for the
- service itself.
+ embed_detail:
+ When True, the management service returns properties for all
+ deployments of the service, as well as for the service itself.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('embed_detail', embed_detail)
@@ -241,30 +228,33 @@ def create_hosted_service(self, service_name, label, description=None, location=
'''
Creates a new hosted service in Windows Azure.
- service_name: A name for the hosted service that is unique within
- Windows Azure. This name is the DNS prefix name and can
- be used to access the hosted service.
- label: A name for the hosted service that is base-64 encoded. The name
- can be up to 100 characters in length. The name can be used
- identify the storage account for your tracking purposes.
- description: A description for the hosted service. The description can
- be up to 1024 characters in length.
- location: The location where the hosted service will be created. You
- can specify either a location or affinity_group, but not
- both.
- affinity_group: The name of an existing affinity group associated with
- this subscription. This name is a GUID and can be
- retrieved by examining the name element of the response
- body returned by list_affinity_groups. You can specify
- either a location or affinity_group, but not both.
- extended_properties: Dictionary containing name/value pairs of
- extended hosted service properties. You can have
- a maximum of 50 extended property name/value
- pairs. The maximum length of the Name element is
- 64 characters, only alphanumeric characters and
- underscores are valid in the Name, and the name
- must start with a letter. The value has a maximum
- length of 255 characters.
+ service_name:
+ A name for the hosted service that is unique within Windows Azure.
+ This name is the DNS prefix name and can be used to access the
+ hosted service.
+ label:
+ A name for the hosted service that is base-64 encoded. The name can
+ be up to 100 characters in length. The name can be used identify
+ the storage account for your tracking purposes.
+ description:
+ A description for the hosted service. The description can be up to
+ 1024 characters in length.
+ location:
+ The location where the hosted service will be created. You can
+ specify either a location or affinity_group, but not both.
+ affinity_group:
+ The name of an existing affinity group associated with this
+ subscription. This name is a GUID and can be retrieved by examining
+ the name element of the response body returned by
+ list_affinity_groups. You can specify either a location or
+ affinity_group, but not both.
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('label', label)
@@ -281,22 +271,23 @@ def update_hosted_service(self, service_name, label=None, description=None, exte
Windows Azure.
service_name: Name of the hosted service.
- label: A name for the hosted service that is base64-encoded. The name
- may be up to 100 characters in length. You must specify a value
- for either Label or Description, or for both. It is recommended
- that the label be unique within the subscription. The name can
- be used identify the hosted service for your tracking purposes.
- description: A description for the hosted service. The description may
- be up to 1024 characters in length. You must specify a
- value for either Label or Description, or for both.
- extended_properties: Dictionary containing name/value pairs of
- extended hosted service properties. You can have
- a maximum of 50 extended property name/value
- pairs. The maximum length of the Name element is
- 64 characters, only alphanumeric characters and
- underscores are valid in the Name, and the name
- must start with a letter. The value has a maximum
- length of 255 characters.
+ label:
+ A name for the hosted service that is base64-encoded. The name may
+ be up to 100 characters in length. You must specify a value for
+ either Label or Description, or for both. It is recommended that
+ the label be unique within the subscription. The name can be used
+ identify the hosted service for your tracking purposes.
+ description:
+ A description for the hosted service. The description may be up to
+ 1024 characters in length. You must specify a value for either
+ Label or Description, or for both.
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
return self._perform_put(self._get_hosted_service_path(service_name),
@@ -317,8 +308,9 @@ def get_deployment_by_slot(self, service_name, deployment_slot):
a deployment.
service_name: Name of the hosted service.
- deployment_slot: The environment to which the hosted service is
- deployed. Valid values are: staging, production
+ deployment_slot:
+ The environment to which the hosted service is deployed. Valid
+ values are: staging, production
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
@@ -344,42 +336,43 @@ def create_deployment(self, service_name, deployment_slot, name, package_url, la
or production.
service_name: Name of the hosted service.
- deployment_slot: The environment to which the hosted service is
- deployed. Valid values are: staging, production
- name: The name for the deployment. The deployment name must be unique
- among other deployments for the hosted service.
- package_url: A URL that refers to the location of the service package
- in the Blob service. The service package can be located
- either in a storage account beneath the same subscription
- or a Shared Access Signature (SAS) URI from any storage
- account.
- label: A name for the hosted service that is base-64 encoded. The name
- can be up to 100 characters in length. It is recommended that
- the label be unique within the subscription. The name can be
- used identify the hosted service for your tracking purposes.
- configuration: The base-64 encoded service configuration file for the
- deployment.
- start_deployment: Indicates whether to start the deployment
- immediately after it is created. If false, the
- service model is still deployed to the virtual
- machines but the code is not run immediately.
- Instead, the service is Suspended until you call
- Update Deployment Status and set the status to
- Running, at which time the service will be started.
- A deployed service still incurs charges, even if it
- is suspended.
- treat_warnings_as_error: Indicates whether to treat package validation
- warnings as errors. If set to true, the
- Created Deployment operation fails if there
- are validation warnings on the service package.
- extended_properties: Dictionary containing name/value pairs of
- extended hosted service properties. You can have
- a maximum of 50 extended property name/value
- pairs. The maximum length of the Name element is
- 64 characters, only alphanumeric characters and
- underscores are valid in the Name, and the name
- must start with a letter. The value has a maximum
- length of 255 characters.
+ deployment_slot:
+ The environment to which the hosted service is deployed. Valid
+ values are: staging, production
+ name:
+ The name for the deployment. The deployment name must be unique
+ among other deployments for the hosted service.
+ package_url:
+ A URL that refers to the location of the service package in the
+ Blob service. The service package can be located either in a
+ storage account beneath the same subscription or a Shared Access
+ Signature (SAS) URI from any storage account.
+ label:
+ A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that the
+ label be unique within the subscription. The name can be used
+ identify the hosted service for your tracking purposes.
+ configuration:
+ The base-64 encoded service configuration file for the deployment.
+ start_deployment:
+ Indicates whether to start the deployment immediately after it is
+ created. If false, the service model is still deployed to the
+ virtual machines but the code is not run immediately. Instead, the
+ service is Suspended until you call Update Deployment Status and
+ set the status to Running, at which time the service will be
+ started. A deployed service still incurs charges, even if it is
+ suspended.
+ treat_warnings_as_error:
+ Indicates whether to treat package validation warnings as errors.
+ If set to true, the Created Deployment operation fails if there
+ are validation warnings on the service package.
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
@@ -428,25 +421,24 @@ def change_deployment_configuration(self, service_name, deployment_name, configu
service_name: Name of the hosted service.
deployment_name: The name of the deployment.
- configuration: The base-64 encoded service configuration file for the
- deployment.
- treat_warnings_as_error: Indicates whether to treat package validation
- warnings as errors. If set to true, the
- Created Deployment operation fails if there
- are validation warnings on the service
- package.
- mode: If set to Manual, WalkUpgradeDomain must be called to apply the
- update. If set to Auto, the Windows Azure platform will
- automatically apply the update To each upgrade domain for the
- service. Possible values are: Auto, Manual
- extended_properties: Dictionary containing name/value pairs of
- extended hosted service properties. You can have
- a maximum of 50 extended property name/value
- pairs. The maximum length of the Name element is
- 64 characters, only alphanumeric characters and
- underscores are valid in the Name, and the name
- must start with a letter. The value has a maximum
- length of 255 characters.
+ configuration:
+ The base-64 encoded service configuration file for the deployment.
+ treat_warnings_as_error:
+ Indicates whether to treat package validation warnings as errors.
+ If set to true, the Created Deployment operation fails if there
+ are validation warnings on the service package.
+ mode:
+ If set to Manual, WalkUpgradeDomain must be called to apply the
+ update. If set to Auto, the Windows Azure platform will
+ automatically apply the update To each upgrade domain for the
+ service. Possible values are: Auto, Manual
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -461,8 +453,9 @@ def update_deployment_status(self, service_name, deployment_name, status):
service_name: Name of the hosted service.
deployment_name: The name of the deployment.
- status: The change to initiate to the deployment status. Possible
- values include: Running, Suspended
+ status:
+ The change to initiate to the deployment status. Possible values
+ include: Running, Suspended
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -477,34 +470,36 @@ def upgrade_deployment(self, service_name, deployment_name, mode, package_url, c
service_name: Name of the hosted service.
deployment_name: The name of the deployment.
- mode: If set to Manual, WalkUpgradeDomain must be called to apply the
- update. If set to Auto, the Windows Azure platform will
- automatically apply the update To each upgrade domain for the
- service. Possible values are: Auto, Manual
- package_url: A URL that refers to the location of the service package
- in the Blob service. The service package can be located
- either in a storage account beneath the same subscription
- or a Shared Access Signature (SAS) URI from any storage
- account.
- configuration: The base-64 encoded service configuration file for the
- deployment.
- label: A name for the hosted service that is base-64 encoded. The name
- can be up to 100 characters in length. It is recommended that
- the label be unique within the subscription. The name can be
- used identify the hosted service for your tracking purposes.
- force: Specifies whether the rollback should proceed even when it will
- cause local data to be lost from some role instances. True if
- the rollback should proceed; otherwise false if the rollback
- should fail.
+ mode:
+ If set to Manual, WalkUpgradeDomain must be called to apply the
+ update. If set to Auto, the Windows Azure platform will
+ automatically apply the update To each upgrade domain for the
+ service. Possible values are: Auto, Manual
+ package_url:
+ A URL that refers to the location of the service package in the
+ Blob service. The service package can be located either in a
+ storage account beneath the same subscription or a Shared Access
+ Signature (SAS) URI from any storage account.
+ configuration:
+ The base-64 encoded service configuration file for the deployment.
+ label:
+ A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that the
+ label be unique within the subscription. The name can be used
+ identify the hosted service for your tracking purposes.
+ force:
+ Specifies whether the rollback should proceed even when it will
+ cause local data to be lost from some role instances. True if the
+ rollback should proceed; otherwise false if the rollback should
+ fail.
role_to_upgrade: The name of the specific role to upgrade.
- extended_properties: Dictionary containing name/value pairs of
- extended hosted service properties. You can have
- a maximum of 50 extended property name/value
- pairs. The maximum length of the Name element is
- 64 characters, only alphanumeric characters and
- underscores are valid in the Name, and the name
- must start with a letter. The value has a maximum
- length of 255 characters.
+ extended_properties:
+ Dictionary containing name/value pairs of storage account
+ properties. You can have a maximum of 50 extended property
+ name/value pairs. The maximum length of the Name element is 64
+ characters, only alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a letter. The value has
+ a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -524,10 +519,10 @@ def walk_upgrade_domain(self, service_name, deployment_name, upgrade_domain):
service_name: Name of the hosted service.
deployment_name: The name of the deployment.
- upgrade_domain: An integer value that identifies the upgrade domain
- to walk. Upgrade domains are identified with a
- zero-based index: the first upgrade domain has an ID
- of 0, the second has an ID of 1, and so on.
+ upgrade_domain:
+ An integer value that identifies the upgrade domain to walk.
+ Upgrade domains are identified with a zero-based index: the first
+ upgrade domain has an ID of 0, the second has an ID of 1, and so on.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -544,14 +539,16 @@ def rollback_update_or_upgrade(self, service_name, deployment_name, mode, force)
service_name: Name of the hosted service.
deployment_name: The name of the deployment.
- mode: Specifies whether the rollback should proceed automatically.
+ mode:
+ Specifies whether the rollback should proceed automatically.
auto - The rollback proceeds without further user input.
manual - You must call the Walk Upgrade Domain operation to
apply the rollback to each upgrade domain.
- force: Specifies whether the rollback should proceed even when it will
- cause local data to be lost from some role instances. True if
- the rollback should proceed; otherwise false if the rollback
- should fail.
+ force:
+ Specifies whether the rollback should proceed even when it will
+ cause local data to be lost from some role instances. True if the
+ rollback should proceed; otherwise false if the rollback should
+ fail.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -635,8 +632,8 @@ def add_service_certificate(self, service_name, data, certificate_format, passwo
service_name: Name of the hosted service.
data: The base-64 encoded form of the pfx file.
- certificate_format: The service certificate format. The only supported
- value is pfx.
+ certificate_format:
+ The service certificate format. The only supported value is pfx.
password: The certificate password.
'''
_validate_not_none('service_name', service_name)
@@ -696,10 +693,10 @@ def add_management_certificate(self, public_key, thumbprint, data):
attempting to connect to resources associated with your Windows Azure
subscription.
- public_key: A base64 representation of the management certificate
- public key.
- thumbprint: The thumb print that uniquely identifies the management
- certificate.
+ public_key:
+ A base64 representation of the management certificate public key.
+ thumbprint:
+ The thumb print that uniquely identifies the management certificate.
data: The certificate?s raw data in base-64 encoded .cer format.
'''
_validate_not_none('public_key', public_key)
@@ -716,7 +713,8 @@ def delete_management_certificate(self, thumbprint):
attempting to connect to resources associated with your Windows Azure
subscription.
- thumbprint: The thumb print that uniquely identifies the management certificate.
+ thumbprint:
+ The thumb print that uniquely identifies the management certificate.
'''
_validate_not_none('thumbprint', thumbprint)
return self._perform_delete('/' + self.subscription_id + '/certificates/' + _str(thumbprint))
@@ -745,13 +743,15 @@ def create_affinity_group(self, name, label, location, description=None):
Creates a new affinity group for the specified subscription.
name: A name for the affinity group that is unique to the subscription.
- label: A base-64 encoded name for the affinity group. The name can be
- up to 100 characters in length.
- location: The data center location where the affinity group will be
- created. To list available locations, use the list_location
- function.
- description: A description for the affinity group. The description can
- be up to 1024 characters in length.
+ label:
+ A base-64 encoded name for the affinity group. The name can be up
+ to 100 characters in length.
+ location:
+ The data center location where the affinity group will be created.
+ To list available locations, use the list_location function.
+ description:
+ A description for the affinity group. The description can be up to
+ 1024 characters in length.
'''
_validate_not_none('name', name)
_validate_not_none('label', label)
@@ -765,10 +765,12 @@ def update_affinity_group(self, affinity_group_name, label, description=None):
specified subscription.
affinity_group_name: The name of the affinity group.
- label: A name for the affinity specified as a base-64 encoded string.
- The label can be up to 100 characters in length.
- description: A description for the affinity group. The description can
- be up to 1024 characters in length.
+ label:
+ A name for the affinity specified as a base-64 encoded string.
+ The label can be up to 100 characters in length.
+ description:
+ A description for the affinity group. The description can be up to
+ 1024 characters in length.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
_validate_not_none('label', label)
@@ -847,51 +849,57 @@ def get_role(self, service_name, deployment_name, role_name):
return self._perform_get(self._get_role_path(service_name, deployment_name, role_name),
PersistentVMRole)
- def create_virtual_machine_deployment(self, service_name, deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ def create_virtual_machine_deployment(self, service_name, deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole', virtual_network_name=None):
'''
Provisions a virtual machine based on the supplied configuration.
service_name: Name of the hosted service.
- deployment_name: The name for the deployment. The deployment name must
- be unique among other deployments for the hosted
- service.
- deployment_slot: The environment to which the hosted service is
- deployed. Valid values are: staging, production
- label: A name for the hosted service that is base-64 encoded. The name
- can be up to 100 characters in length. It is recommended that
- the label be unique within the subscription. The name can be
- used identify the hosted service for your tracking purposes.
+ deployment_name:
+ The name for the deployment. The deployment name must be unique
+ among other deployments for the hosted service.
+ deployment_slot:
+ The environment to which the hosted service is deployed. Valid
+ values are: staging, production
+ label:
+ A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that the
+ label be unique within the subscription. The name can be used
+ identify the hosted service for your tracking purposes.
role_name: The name of the role.
- system_config: Contains the metadata required to provision a virtual
- machine from a Windows or Linux OS image. Use an
- instance of WindowsConfigurationSet or
- LinuxConfigurationSet.
- os_virtual_hard_disk: Contains the parameters Windows Azure uses to
- create the operating system disk for the virtual
- machine.
- network_config: Encapsulates the metadata required to create the
- virtual network configuration for a virtual machine.
- If you do not include a network configuration set you
- will not be able to access the VM through VIPs over
- the internet. If your virtual machine belongs to a
- virtual network you can not specify which subnet
- address space it resides under.
- availability_set_name: Specifies the name of an availability set to
- which to add the virtual machine. This value
- controls the virtual machine allocation in the
- Windows Azure environment. Virtual machines
- specified in the same availability set are
- allocated to different nodes to maximize
- availability.
- data_virtual_hard_disks: Contains the parameters Windows Azure uses to
- create a data disk for a virtual machine.
- role_size: The size of the virtual machine to allocate. The default
- value is Small. Possible values are: ExtraSmall, Small,
- Medium, Large, ExtraLarge. The specified value must be
- compatible with the disk selected in the OSVirtualHardDisk
- values.
- role_type: The type of the role for the virtual machine. The only
- supported value is PersistentVMRole.
+ system_config:
+ Contains the metadata required to provision a virtual machine from
+ a Windows or Linux OS image. Use an instance of
+ WindowsConfigurationSet or LinuxConfigurationSet.
+ os_virtual_hard_disk:
+ Contains the parameters Windows Azure uses to create the operating
+ system disk for the virtual machine.
+ network_config:
+ Encapsulates the metadata required to create the virtual network
+ configuration for a virtual machine. If you do not include a
+ network configuration set you will not be able to access the VM
+ through VIPs over the internet. If your virtual machine belongs to
+ a virtual network you can not specify which subnet address space
+ it resides under.
+ availability_set_name:
+ Specifies the name of an availability set to which to add the
+ virtual machine. This value controls the virtual machine
+ allocation in the Windows Azure environment. Virtual machines
+ specified in the same availability set are allocated to different
+ nodes to maximize availability.
+ data_virtual_hard_disks:
+ Contains the parameters Windows Azure uses to create a data disk
+ for a virtual machine.
+ role_size:
+ The size of the virtual machine to allocate. The default value is
+ Small. Possible values are: ExtraSmall, Small, Medium, Large,
+ ExtraLarge. The specified value must be compatible with the disk
+ selected in the OSVirtualHardDisk values.
+ role_type:
+ The type of the role for the virtual machine. The only supported
+ value is PersistentVMRole.
+ virtual_network_name:
+ Specifies the name of an existing virtual network to which the
+ deployment will belong.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -901,7 +909,7 @@ def create_virtual_machine_deployment(self, service_name, deployment_name, deplo
_validate_not_none('system_config', system_config)
_validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)
return self._perform_post(self._get_deployment_path_using_name(service_name),
- _XmlSerializer.virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ _XmlSerializer.virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size, virtual_network_name),
async=True)
def add_role(self, service_name, deployment_name, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
@@ -911,36 +919,37 @@ def add_role(self, service_name, deployment_name, role_name, system_config, os_v
service_name: The name of the service.
deployment_name: The name of the deployment.
role_name: The name of the role.
- system_config: Contains the metadata required to provision a virtual
- machine from a Windows or Linux OS image. Use an
- instance of WindowsConfigurationSet or
- LinuxConfigurationSet.
- os_virtual_hard_disk: Contains the parameters Windows Azure uses to
- create the operating system disk for the virtual
- machine.
- network_config: Encapsulates the metadata required to create the
- virtual network configuration for a virtual machine.
- If you do not include a network configuration set you
- will not be able to access the VM through VIPs over
- the internet. If your virtual machine belongs to a
- virtual network you can not specify which subnet
- address space it resides under.
- availability_set_name: Specifies the name of an availability set to
- which to add the virtual machine. This value
- controls the virtual machine allocation in the
- Windows Azure environment. Virtual machines
- specified in the same availability set are
- allocated to different nodes to maximize
- availability.
- data_virtual_hard_disks: Contains the parameters Windows Azure uses to
- create a data disk for a virtual machine.
- role_size: The size of the virtual machine to allocate. The default
- value is Small. Possible values are: ExtraSmall, Small,
- Medium, Large, ExtraLarge. The specified value must be
- compatible with the disk selected in the OSVirtualHardDisk
- values.
- role_type: The type of the role for the virtual machine. The only
- supported value is PersistentVMRole.
+ system_config:
+ Contains the metadata required to provision a virtual machine from
+ a Windows or Linux OS image. Use an instance of
+ WindowsConfigurationSet or LinuxConfigurationSet.
+ os_virtual_hard_disk:
+ Contains the parameters Windows Azure uses to create the operating
+ system disk for the virtual machine.
+ network_config:
+ Encapsulates the metadata required to create the virtual network
+ configuration for a virtual machine. If you do not include a
+ network configuration set you will not be able to access the VM
+ through VIPs over the internet. If your virtual machine belongs to
+ a virtual network you can not specify which subnet address space
+ it resides under.
+ availability_set_name:
+ Specifies the name of an availability set to which to add the
+ virtual machine. This value controls the virtual machine allocation
+ in the Windows Azure environment. Virtual machines specified in the
+ same availability set are allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks:
+ Contains the parameters Windows Azure uses to create a data disk
+ for a virtual machine.
+ role_size:
+ The size of the virtual machine to allocate. The default value is
+ Small. Possible values are: ExtraSmall, Small, Medium, Large,
+ ExtraLarge. The specified value must be compatible with the disk
+ selected in the OSVirtualHardDisk values.
+ role_type:
+ The type of the role for the virtual machine. The only supported
+ value is PersistentVMRole.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -958,32 +967,33 @@ def update_role(self, service_name, deployment_name, role_name, os_virtual_hard_
service_name: The name of the service.
deployment_name: The name of the deployment.
role_name: The name of the role.
- os_virtual_hard_disk: Contains the parameters Windows Azure uses to
- create the operating system disk for the virtual
- machine.
- network_config: Encapsulates the metadata required to create the
- virtual network configuration for a virtual machine.
- If you do not include a network configuration set you
- will not be able to access the VM through VIPs over
- the internet. If your virtual machine belongs to a
- virtual network you can not specify which subnet
- address space it resides under.
- availability_set_name: Specifies the name of an availability set to
- which to add the virtual machine. This value
- controls the virtual machine allocation in the
- Windows Azure environment. Virtual machines
- specified in the same availability set are
- allocated to different nodes to maximize
- availability.
- data_virtual_hard_disks: Contains the parameters Windows Azure uses to
- create a data disk for a virtual machine.
- role_size: The size of the virtual machine to allocate. The default
- value is Small. Possible values are: ExtraSmall, Small,
- Medium, Large, ExtraLarge. The specified value must be
- compatible with the disk selected in the OSVirtualHardDisk
- values.
- role_type: The type of the role for the virtual machine. The only
- supported value is PersistentVMRole.
+ os_virtual_hard_disk:
+ Contains the parameters Windows Azure uses to create the operating
+ system disk for the virtual machine.
+ network_config:
+ Encapsulates the metadata required to create the virtual network
+ configuration for a virtual machine. If you do not include a
+ network configuration set you will not be able to access the VM
+ through VIPs over the internet. If your virtual machine belongs to
+ a virtual network you can not specify which subnet address space
+ it resides under.
+ availability_set_name:
+ Specifies the name of an availability set to which to add the
+ virtual machine. This value controls the virtual machine allocation
+ in the Windows Azure environment. Virtual machines specified in the
+ same availability set are allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks:
+ Contains the parameters Windows Azure uses to create a data disk
+ for a virtual machine.
+ role_size:
+ The size of the virtual machine to allocate. The default value is
+ Small. Possible values are: ExtraSmall, Small, Medium, Large,
+ ExtraLarge. The specified value must be compatible with the disk
+ selected in the OSVirtualHardDisk values.
+ role_type:
+ The type of the role for the virtual machine. The only supported
+ value is PersistentVMRole.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -1015,15 +1025,15 @@ def capture_role(self, service_name, deployment_name, role_name, post_capture_ac
service_name: The name of the service.
deployment_name: The name of the deployment.
role_name: The name of the role.
- post_capture_action: Specifies the action after capture operation
- completes. Possible values are: Delete,
- Reprovision.
- target_image_name: Specifies the image name of the captured virtual
- machine.
- target_image_label: Specifies the friendly name of the captured
- virtual machine.
- provisioning_configuration: Use an instance of WindowsConfigurationSet
- or LinuxConfigurationSet.
+ post_capture_action:
+ Specifies the action after capture operation completes. Possible
+ values are: Delete, Reprovision.
+ target_image_name:
+ Specifies the image name of the captured virtual machine.
+ target_image_label:
+ Specifies the friendly name of the captured virtual machine.
+ provisioning_configuration:
+ Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -1101,15 +1111,17 @@ def add_os_image(self, label, media_link, name, os):
subscription to the image repository.
label: Specifies the friendly name of the image.
- media_link: Specifies the location of the blob in Windows Azure blob
- store where the media for the image is located. The blob
- location must belong to a storage account in the
- subscription specified by the value in
- the operation call. Example:
- http://example.blob.core.windows.net/disks/mydisk.vhd
- name: Specifies a name for the OS image that Windows Azure uses to
- identify the image when creating one or more virtual machines.
- os: The operating system type of the OS image. Possible values are:
+ media_link:
+ Specifies the location of the blob in Windows Azure blob store
+ where the media for the image is located. The blob location must
+ belong to a storage account in the subscription specified by the
+ value in the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name:
+ Specifies a name for the OS image that Windows Azure uses to
+ identify the image when creating one or more virtual machines.
+ os:
+ The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('label', label)
@@ -1125,18 +1137,21 @@ def update_os_image(self, image_name, label, media_link, name, os):
Updates an OS image that in your image repository.
image_name: The name of the image to update.
- label: Specifies the friendly name of the image to be updated. You
- cannot use this operation to update images provided by the
- Windows Azure platform.
- media_link: Specifies the location of the blob in Windows Azure blob
- store where the media for the image is located. The blob
- location must belong to a storage account in the
- subscription specified by the value in
- the operation call. Example:
- http://example.blob.core.windows.net/disks/mydisk.vhd
- name: Specifies a name for the OS image that Windows Azure uses to
- identify the image when creating one or more VM Roles.
- os: The operating system type of the OS image. Possible values are:
+ label:
+ Specifies the friendly name of the image to be updated. You cannot
+ use this operation to update images provided by the Windows Azure
+ platform.
+ media_link:
+ Specifies the location of the blob in Windows Azure blob store
+ where the media for the image is located. The blob location must
+ belong to a storage account in the subscription specified by the
+ value in the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name:
+ Specifies a name for the OS image that Windows Azure uses to
+ identify the image when creating one or more VM Roles.
+ os:
+ The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('image_name', image_name)
@@ -1182,37 +1197,38 @@ def add_data_disk(self, service_name, deployment_name, role_name, lun, host_cach
service_name: The name of the service.
deployment_name: The name of the deployment.
role_name: The name of the role.
- lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN
- specifies the slot in which the data drive appears when mounted
- for usage by the virtual machine. Valid LUN values are 0 through
- 15.
- host_caching: Specifies the platform caching behavior of data disk
- blob for read/write efficiency. The default vault is
- ReadOnly. Possible values are: None, ReadOnly, ReadWrite
- media_link: Specifies the location of the blob in Windows Azure blob
- store where the media for the disk is located. The blob
- location must belong to the storage account in the
- subscription specified by the value in
- the operation call. Example:
- http://example.blob.core.windows.net/disks/mydisk.vhd
- disk_label: Specifies the description of the data disk. When you
- attach a disk, either by directly referencing a media
- using the MediaLink element or specifying the target disk
- size, you can use the DiskLabel element to customize the
- name property of the target data disk.
- disk_name: Specifies the name of the disk. Windows Azure uses the
- specified disk to create the data disk for the machine and
- populates this field with the disk name.
- logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk
- to be attached to the role. The disk can be
- created as part of disk attach or create VM
- role call by specifying the value for this
- property. Windows Azure creates the empty
- disk based on size preference and attaches
- the newly created disk to the Role.
- source_media_link: Specifies the location of a blob in account storage
- which is mounted as a data disk when the virtual
- machine is created.
+ lun:
+ Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through 15.
+ host_caching:
+ Specifies the platform caching behavior of data disk blob for
+ read/write efficiency. The default vault is ReadOnly. Possible
+ values are: None, ReadOnly, ReadWrite
+ media_link:
+ Specifies the location of the blob in Windows Azure blob store
+ where the media for the disk is located. The blob location must
+ belong to the storage account in the subscription specified by the
+ value in the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ disk_label:
+ Specifies the description of the data disk. When you attach a disk,
+ either by directly referencing a media using the MediaLink element
+ or specifying the target disk size, you can use the DiskLabel
+ element to customize the name property of the target data disk.
+ disk_name:
+ Specifies the name of the disk. Windows Azure uses the specified
+ disk to create the data disk for the machine and populates this
+ field with the disk name.
+ logical_disk_size_in_gb:
+ Specifies the size, in GB, of an empty disk to be attached to the
+ role. The disk can be created as part of disk attach or create VM
+ role call by specifying the value for this property. Windows Azure
+ creates the empty disk based on size preference and attaches the
+ newly created disk to the Role.
+ source_media_link:
+ Specifies the location of a blob in account storage which is
+ mounted as a data disk when the virtual machine is created.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -1230,38 +1246,40 @@ def update_data_disk(self, service_name, deployment_name, role_name, lun, host_c
service_name: The name of the service.
deployment_name: The name of the deployment.
role_name: The name of the role.
- lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN
- specifies the slot in which the data drive appears when mounted
- for usage by the virtual machine. Valid LUN values are 0 through
- 15.
- host_caching: Specifies the platform caching behavior of data disk
- blob for read/write efficiency. The default vault is
- ReadOnly. Possible values are: None, ReadOnly, ReadWrite
- media_link: Specifies the location of the blob in Windows Azure blob
- store where the media for the disk is located. The blob
- location must belong to the storage account in the
- subscription specified by the value in
- the operation call. Example:
- http://example.blob.core.windows.net/disks/mydisk.vhd
- updated_lun: Specifies the Logical Unit Number (LUN) for the disk. The
- LUN specifies the slot in which the data drive appears
- when mounted for usage by the virtual machine. Valid LUN
- values are 0 through 15.
- disk_label: Specifies the description of the data disk. When you
- attach a disk, either by directly referencing a media
- using the MediaLink element or specifying the target disk
- size, you can use the DiskLabel element to customize the
- name property of the target data disk.
- disk_name: Specifies the name of the disk. Windows Azure uses the
- specified disk to create the data disk for the machine and
- populates this field with the disk name.
- logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk
- to be attached to the role. The disk can be
- created as part of disk attach or create VM
- role call by specifying the value for this
- property. Windows Azure creates the empty
- disk based on size preference and attaches
- the newly created disk to the Role.
+ lun:
+ Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through
+ 15.
+ host_caching:
+ Specifies the platform caching behavior of data disk blob for
+ read/write efficiency. The default vault is ReadOnly. Possible
+ values are: None, ReadOnly, ReadWrite
+ media_link:
+ Specifies the location of the blob in Windows Azure blob store
+ where the media for the disk is located. The blob location must
+ belong to the storage account in the subscription specified by
+ the value in the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ updated_lun:
+ Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through 15.
+ disk_label:
+ Specifies the description of the data disk. When you attach a disk,
+ either by directly referencing a media using the MediaLink element
+ or specifying the target disk size, you can use the DiskLabel
+ element to customize the name property of the target data disk.
+ disk_name:
+ Specifies the name of the disk. Windows Azure uses the specified
+ disk to create the data disk for the machine and populates this
+ field with the disk name.
+ logical_disk_size_in_gb:
+ Specifies the size, in GB, of an empty disk to be attached to the
+ role. The disk can be created as part of disk attach or create VM
+ role call by specifying the value for this property. Windows Azure
+ creates the empty disk based on size preference and attaches the
+ newly created disk to the Role.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
@@ -1307,18 +1325,19 @@ def add_disk(self, has_operating_system, label, media_link, name, os):
Adds a disk to the user image repository. The disk can be an OS disk
or a data disk.
- has_operating_system: Specifies whether the disk contains an operation
- system. Only a disk with an operating system
- installed can be mounted as OS Drive.
+ has_operating_system:
+ Specifies whether the disk contains an operation system. Only a
+ disk with an operating system installed can be mounted as OS Drive.
label: Specifies the description of the disk.
- media_link: Specifies the location of the blob in Windows Azure blob
- store where the media for the disk is located. The blob
- location must belong to the storage account in the current
- subscription specified by the value in
- the operation call. Example:
- http://example.blob.core.windows.net/disks/mydisk.vhd
- name: Specifies a name for the disk. Windows Azure uses the name to
- identify the disk when creating virtual machines from the disk.
+ media_link:
+ Specifies the location of the blob in Windows Azure blob store
+ where the media for the disk is located. The blob location must
+ belong to the storage account in the current subscription specified
+ by the value in the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name:
+ Specifies a name for the disk. Windows Azure uses the name to
+ identify the disk when creating virtual machines from the disk.
os: The OS type of the disk. Possible values are: Linux, Windows
'''
_validate_not_none('has_operating_system', has_operating_system)
@@ -1334,18 +1353,19 @@ def update_disk(self, disk_name, has_operating_system, label, media_link, name,
Updates an existing disk in your image repository.
disk_name: The name of the disk to update.
- has_operating_system: Specifies whether the disk contains an operation
- system. Only a disk with an operating system
- installed can be mounted as OS Drive.
+ has_operating_system:
+ Specifies whether the disk contains an operation system. Only a
+ disk with an operating system installed can be mounted as OS Drive.
label: Specifies the description of the disk.
- media_link: Specifies the location of the blob in Windows Azure blob
- store where the media for the disk is located. The blob
- location must belong to the storage account in the current
- subscription specified by the value in
- the operation call. Example:
- http://example.blob.core.windows.net/disks/mydisk.vhd
- name: Specifies a name for the disk. Windows Azure uses the name to
- identify the disk when creating virtual machines from the disk.
+ media_link:
+ Specifies the location of the blob in Windows Azure blob store
+ where the media for the disk is located. The blob location must
+ belong to the storage account in the current subscription specified
+ by the value in the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name:
+ Specifies a name for the disk. Windows Azure uses the name to
+ identify the disk when creating virtual machines from the disk.
os: The OS type of the disk. Possible values are: Linux, Windows
'''
_validate_not_none('disk_name', disk_name)
@@ -1368,78 +1388,6 @@ def delete_disk(self, disk_name):
return self._perform_delete(self._get_disk_path(disk_name))
#--Helper functions --------------------------------------------------
- def _perform_request(self, request):
- try:
- resp = self._filter(request)
- except HTTPError as e:
- return _management_error_handler(e)
-
- return resp
-
- def _perform_get(self, path, response_type):
- request = HTTPRequest()
- request.method = 'GET'
- request.host = self.host
- request.path = path
- request.path, request.query = _update_request_uri_query(request)
- request.headers = _update_management_header(request)
- response = self._perform_request(request)
-
- return _parse_response(response, response_type)
-
- def _perform_put(self, path, body, async=False):
- request = HTTPRequest()
- request.method = 'PUT'
- request.host = self.host
- request.path = path
- request.body = _get_request_body(body)
- request.path, request.query = _update_request_uri_query(request)
- request.headers = _update_management_header(request)
- response = self._perform_request(request)
-
- if async:
- return _parse_response_for_async_op(response)
-
- return None
-
- def _perform_post(self, path, body, response_type=None, async=False):
- request = HTTPRequest()
- request.method = 'POST'
- request.host = self.host
- request.path = path
- request.body = _get_request_body(body)
- request.path, request.query = _update_request_uri_query(request)
- request.headers = _update_management_header(request)
- response = self._perform_request(request)
-
- if response_type is not None:
- return _parse_response(response, response_type)
-
- if async:
- return _parse_response_for_async_op(response)
-
- return None
-
- def _perform_delete(self, path, async=False):
- request = HTTPRequest()
- request.method = 'DELETE'
- request.host = self.host
- request.path = path
- request.path, request.query = _update_request_uri_query(request)
- request.headers = _update_management_header(request)
- response = self._perform_request(request)
-
- if async:
- return _parse_response_for_async_op(response)
-
- return None
-
- def _get_path(self, resource, name):
- path = '/' + self.subscription_id + '/' + resource
- if name is not None:
- path += '/' + _str(name)
- return path
-
def _get_storage_service_path(self, service_name=None):
return self._get_path('services/storageservices', service_name)
diff --git a/src/azure/storage/__init__.py b/src/azure/storage/__init__.py
index c022b52b06da..fc95ae88c9da 100644
--- a/src/azure/storage/__init__.py
+++ b/src/azure/storage/__init__.py
@@ -12,27 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-import datetime
import base64
import hashlib
import hmac
-import urllib2
-from xml.dom import minidom
import types
-from datetime import datetime
-from azure import (_create_entry, METADATA_NS, _parse_response_for_dict,
- _get_entry_properties, WindowsAzureError,
- _get_child_nodes, _get_child_nodesNS,
- WindowsAzureConflictError, _general_error_handler,
- WindowsAzureMissingResourceError, _list_of,
- DEV_TABLE_HOST, TABLE_SERVICE_HOST_BASE, DEV_BLOB_HOST,
- BLOB_SERVICE_HOST_BASE, DEV_QUEUE_HOST,
- QUEUE_SERVICE_HOST_BASE, WindowsAzureData,
- _get_children_from_path, xml_escape,
- _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY)
-import azure
-
+from datetime import datetime
+from xml.dom import minidom
+from azure import (WindowsAzureData,
+ WindowsAzureError,
+ METADATA_NS,
+ xml_escape,
+ _create_entry,
+ _fill_data_minidom,
+ _fill_instance_element,
+ _get_child_nodes,
+ _get_child_nodesNS,
+ _get_children_from_path,
+ _get_entry_properties,
+ _general_error_handler,
+ _list_of,
+ _parse_response_for_dict,
+ _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
+ )
#x-ms-version for storage service.
X_MS_VERSION = '2011-08-18'
@@ -156,6 +158,8 @@ class BlobEnumResults(EnumResultsBase):
def __init__(self):
EnumResultsBase.__init__(self)
self.blobs = _list_of(Blob)
+ self.prefixes = _list_of(BlobPrefix)
+ self.delimiter = ''
def __iter__(self):
return iter(self.blobs)
@@ -183,7 +187,6 @@ def __init__(self):
self.url = u''
self.properties = BlobProperties()
self.metadata = {}
- self.blob_prefix = BlobPrefix()
class BlobProperties(WindowsAzureData):
''' Blob Properties '''
@@ -308,6 +311,27 @@ class Table(WindowsAzureData):
''' Only for intellicens and telling user the return type. '''
pass
+def _parse_blob_enum_results_list(response):
+ respbody = response.body
+ return_obj = BlobEnumResults()
+ doc = minidom.parseString(respbody)
+
+ for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
+ for child in _get_children_from_path(enum_results, 'Blobs', 'Blob'):
+ return_obj.blobs.append(_fill_instance_element(child, Blob))
+
+ for child in _get_children_from_path(enum_results, 'Blobs', 'BlobPrefix'):
+ return_obj.prefixes.append(_fill_instance_element(child, BlobPrefix))
+
+ for name, value in vars(return_obj).iteritems():
+ if name == 'blobs' or name == 'prefixes':
+ continue
+ value = _fill_data_minidom(enum_results, name, value)
+ if value is not None:
+ setattr(return_obj, name, value)
+
+ return return_obj
+
def _update_storage_header(request):
''' add addtional headers for storage request. '''
@@ -481,10 +505,12 @@ def _from_entity_int(value):
return int(value)
def _from_entity_datetime(value):
+ format = '%Y-%m-%dT%H:%M:%S'
+ if '.' in value:
+ format = format + '.%f'
if value.endswith('Z'):
- return datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
- else:
- return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
+ format = format + 'Z'
+ return datetime.strptime(value, format)
_ENTITY_TO_PYTHON_CONVERSIONS = {
'Edm.Int32': _from_entity_int,
@@ -508,7 +534,7 @@ def _from_entity_datetime(value):
unicode: _to_entity_str,
}
-def convert_entity_to_xml(source):
+def _convert_entity_to_xml(source):
''' Converts an entity object to xml to send.
The entity format is:
@@ -572,17 +598,17 @@ def convert_entity_to_xml(source):
xmlstr = _create_entry(entity_body)
return xmlstr
-def convert_table_to_xml(table_name):
+def _convert_table_to_xml(table_name):
'''
Create xml to send for a given table name. Since xml format for table is
the same as entity and the only difference is that table has only one
- property 'TableName', so we just call convert_entity_to_xml.
+ property 'TableName', so we just call _convert_entity_to_xml.
table_name: the name of the table
'''
- return convert_entity_to_xml({'TableName': table_name})
+ return _convert_entity_to_xml({'TableName': table_name})
-def convert_block_list_to_xml(block_id_list):
+def _convert_block_list_to_xml(block_id_list):
'''
Convert a block list to xml to send.
@@ -601,7 +627,7 @@ def _create_blob_result(response):
blob_properties = _parse_response_for_dict(response)
return BlobResult(response.body, blob_properties)
-def convert_response_to_block_list(response):
+def _convert_response_to_block_list(response):
'''
Converts xml response to block list class.
'''
diff --git a/src/azure/storage/blobservice.py b/src/azure/storage/blobservice.py
index b25aa26c8642..7aa16dc4ba60 100644
--- a/src/azure/storage/blobservice.py
+++ b/src/azure/storage/blobservice.py
@@ -13,46 +13,76 @@
# limitations under the License.
#--------------------------------------------------------------------------
import base64
-import os
-import urllib2
-from azure.storage import *
+from azure import (WindowsAzureError,
+ BLOB_SERVICE_HOST_BASE,
+ DEV_BLOB_HOST,
+ _convert_class_to_xml,
+ _dont_fail_not_exist,
+ _dont_fail_on_exist,
+ _get_request_body,
+ _int_or_none,
+ _parse_enum_results_list,
+ _parse_response,
+ _parse_response_for_dict,
+ _parse_response_for_dict_filter,
+ _parse_response_for_dict_prefix,
+ _parse_simple_list,
+ _str,
+ _str_or_none,
+ _update_request_uri_query_local_storage,
+ _validate_not_none,
+ xml_escape,
+ )
+from azure.http import HTTPRequest
+from azure.storage import (Container,
+ ContainerEnumResults,
+ PageList,
+ PageRange,
+ SignedIdentifiers,
+ StorageServiceProperties,
+ _convert_block_list_to_xml,
+ _convert_response_to_block_list,
+ _create_blob_result,
+ _parse_blob_enum_results_list,
+ _update_storage_blob_header,
+ )
from azure.storage.storageclient import _StorageClient
-from azure.storage import (_update_storage_blob_header, _create_blob_result,
- convert_block_list_to_xml, convert_response_to_block_list)
-from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
-from azure import (_validate_not_none, Feed,
- _convert_response_to_feeds, _str, _str_or_none, _int_or_none,
- _get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
- WindowsAzureError, _parse_response, _convert_class_to_xml,
- _parse_response_for_dict, _parse_response_for_dict_prefix,
- _parse_response_for_dict_filter,
- _parse_enum_results_list, _update_request_uri_query_local_storage,
- _parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class BlobService(_StorageClient):
'''
This is the main class managing Blob resources.
- account_name: your storage account name, required for all operations.
- account_key: your storage account key, required for all operations.
'''
def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = BLOB_SERVICE_HOST_BASE, dev_host = DEV_BLOB_HOST):
+ '''
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ protocol: Optional. Protocol. Defaults to http.
+ host_base:
+ Optional. Live host base url. Defaults to Azure url. Override this
+ for on-premise.
+ dev_host: Optional. Dev host url. Defaults to localhost.
+ '''
return super(BlobService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
def list_containers(self, prefix=None, marker=None, maxresults=None, include=None):
'''
- The List Containers operation returns a list of the containers under the specified account.
+ The List Containers operation returns a list of the containers under
+ the specified account.
- prefix: Optional. Filters the results to return only containers whose names begin with
- the specified prefix.
- marker: Optional. A string value that identifies the portion of the list to be returned
- with the next list operation.
- maxresults: Optional. Specifies the maximum number of containers to return.
- include: Optional. Include this parameter to specify that the container's metadata be
- returned as part of the response body. set this parameter to string 'metadata' to
- get container's metadata.
+ prefix:
+ Optional. Filters the results to return only containers whose names
+ begin with the specified prefix.
+ marker:
+ Optional. A string value that identifies the portion of the list to
+ be returned with the next list operation.
+ maxresults:
+ Optional. Specifies the maximum number of containers to return.
+ include:
+ Optional. Include this parameter to specify that the container's
+ metadata be returned as part of the response body. set this
+ parameter to string 'metadata' to get container's metadata.
'''
request = HTTPRequest()
request.method = 'GET'
@@ -72,13 +102,17 @@ def list_containers(self, prefix=None, marker=None, maxresults=None, include=Non
def create_container(self, container_name, x_ms_meta_name_values=None, x_ms_blob_public_access=None, fail_on_exist=False):
'''
- Creates a new container under the specified account. If the container with the same name
- already exists, the operation fails.
+ Creates a new container under the specified account. If the container
+ with the same name already exists, the operation fails.
- x_ms_meta_name_values: Optional. A dict with name_value pairs to associate with the
- container as metadata. Example:{'Category':'test'}
- x_ms_blob_public_access: Optional. Possible values include: container, blob.
- fail_on_exist: specify whether to throw an exception when the container exists.
+ container_name: Name of container to create.
+ x_ms_meta_name_values:
+ Optional. A dict with name_value pairs to associate with the
+ container as metadata. Example:{'Category':'test'}
+ x_ms_blob_public_access:
+ Optional. Possible values include: container, blob
+ fail_on_exist:
+ specify whether to throw an exception when the container exists.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -104,7 +138,10 @@ def create_container(self, container_name, x_ms_meta_name_values=None, x_ms_blob
def get_container_properties(self, container_name):
'''
- Returns all user-defined metadata and system properties for the specified container.
+ Returns all user-defined metadata and system properties for the
+ specified container.
+
+ container_name: Name of existing container.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -119,8 +156,10 @@ def get_container_properties(self, container_name):
def get_container_metadata(self, container_name):
'''
- Returns all user-defined metadata for the specified container. The metadata will be
- in returned dictionary['x-ms-meta-(name)'].
+ Returns all user-defined metadata for the specified container. The
+ metadata will be in returned dictionary['x-ms-meta-(name)'].
+
+ container_name: Name of existing container.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -131,13 +170,16 @@ def get_container_metadata(self, container_name):
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- return _parse_response_for_dict(response)
+ return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])
def set_container_metadata(self, container_name, x_ms_meta_name_values=None):
'''
- Sets one or more user-defined name-value pairs for the specified container.
+ Sets one or more user-defined name-value pairs for the specified
+ container.
- x_ms_meta_name_values: A dict containing name, value for metadata. Example: {'category':'test'}
+ container_name: Name of existing container.
+ x_ms_meta_name_values:
+ A dict containing name, value for metadata. Example: {'category':'test'}
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -152,6 +194,8 @@ def set_container_metadata(self, container_name, x_ms_meta_name_values=None):
def get_container_acl(self, container_name):
'''
Gets the permissions for the specified container.
+
+ container_name: Name of existing container.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -168,8 +212,10 @@ def set_container_acl(self, container_name, signed_identifiers=None, x_ms_blob_p
'''
Sets the permissions for the specified container.
- x_ms_blob_public_access: Optional. Possible values include 'container' and 'blob'.
+ container_name: Name of existing container.
signed_identifiers: SignedIdentifers instance
+ x_ms_blob_public_access:
+ Optional. Possible values include: container, blob
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -186,7 +232,9 @@ def delete_container(self, container_name, fail_not_exist=False):
'''
Marks the specified container for deletion.
- fail_not_exist: specify whether to throw an exception when the container doesn't exist.
+ container_name: Name of container to delete.
+ fail_not_exist:
+ Specify whether to throw an exception when the container doesn't exist.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -206,9 +254,51 @@ def delete_container(self, container_name, fail_not_exist=False):
self._perform_request(request)
return True
- def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None, include=None):
+ def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None, include=None, delimiter=None):
'''
Returns the list of blobs under the specified container.
+
+ container_name: Name of existing container.
+ prefix:
+ Optional. Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ marker:
+ Optional. A string value that identifies the portion of the list
+ to be returned with the next list operation. The operation returns
+ a marker value within the response body if the list returned was
+ not complete. The marker value may then be used in a subsequent
+ call to request the next set of list items. The marker value is
+ opaque to the client.
+ maxresults:
+ Optional. Specifies the maximum number of blobs to return,
+ including all BlobPrefix elements. If the request does not specify
+ maxresults or specifies a value greater than 5,000, the server will
+ return up to 5,000 items. Setting maxresults to a value less than
+ or equal to zero results in error response code 400 (Bad Request).
+ include:
+ Optional. Specifies one or more datasets to include in the
+ response. To specify more than one of these options on the URI,
+ you must separate each option with a comma. Valid values are:
+ snapshots:
+ Specifies that snapshots should be included in the
+ enumeration. Snapshots are listed from oldest to newest in
+ the response.
+ metadata:
+ Specifies that blob metadata be returned in the response.
+ uncommittedblobs:
+ Specifies that blobs for which blocks have been uploaded,
+ but which have not been committed using Put Block List
+ (REST API), be included in the response.
+ copy:
+ Version 2012-02-12 and newer. Specifies that metadata
+ related to any current or previous Copy Blob operation
+ should be included in the response.
+ delimiter:
+ Optional. When the request includes this parameter, the operation
+ returns a BlobPrefix element in the response body that acts as a
+ placeholder for all blobs whose names begin with the same
+ substring up to the appearance of the delimiter character. The
+ delimiter may be a single character or a string.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
@@ -217,6 +307,7 @@ def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None,
request.path = '/' + _str(container_name) + '?restype=container&comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
+ ('delimiter', _str_or_none(delimiter)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
@@ -225,17 +316,17 @@ def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None,
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- return _parse_enum_results_list(response, BlobEnumResults, "Blobs", Blob)
+ return _parse_blob_enum_results_list(response)
def set_blob_service_properties(self, storage_service_properties, timeout=None):
'''
- Sets the properties of a storage account's Blob service, including Windows Azure
- Storage Analytics. You can also use this operation to set the default request
- version for all incoming requests that do not have a version specified.
+ Sets the properties of a storage account's Blob service, including
+ Windows Azure Storage Analytics. You can also use this operation to
+ set the default request version for all incoming requests that do not
+ have a version specified.
storage_service_properties: a StorageServiceProperties object.
- timeout: Optional. The timeout parameter is expressed in seconds. For example, the
- following value sets a timeout of 30 seconds for the request: timeout=30.
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
@@ -250,11 +341,10 @@ def set_blob_service_properties(self, storage_service_properties, timeout=None):
def get_blob_service_properties(self, timeout=None):
'''
- Gets the properties of a storage account's Blob service, including Windows Azure
- Storage Analytics.
+ Gets the properties of a storage account's Blob service, including
+ Windows Azure Storage Analytics.
- timeout: Optional. The timeout parameter is expressed in seconds. For example, the
- following value sets a timeout of 30 seconds for the request: timeout=30.
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
@@ -269,8 +359,11 @@ def get_blob_service_properties(self, timeout=None):
def get_blob_properties(self, container_name, blob_name, x_ms_lease_id=None):
'''
- Returns all user-defined metadata, standard HTTP properties, and system properties for the blob.
+ Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the blob.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
@@ -290,7 +383,10 @@ def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control
'''
Sets system properties on the blob.
- x_ms_blob_cache_control: Optional. Modifies the cache control string for the blob.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ x_ms_blob_cache_control:
+ Optional. Modifies the cache control string for the blob.
x_ms_blob_content_type: Optional. Sets the blob's content type.
x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
@@ -317,14 +413,45 @@ def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control
def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_encoding=None, content_language=None, content_md5=None, cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_blob_cache_control=None, x_ms_meta_name_values=None, x_ms_lease_id=None, x_ms_blob_content_length=None, x_ms_blob_sequence_number=None):
'''
- Creates a new block blob or page blob, or updates the content of an existing block blob.
+ Creates a new block blob or page blob, or updates the content of an
+ existing block blob.
- container_name: the name of container to put the blob
- blob_name: the name of blob
+ container_name: Name of existing container.
+ blob_name: Name of blob to create or update.
+ blob: the content of blob.
x_ms_blob_type: Required. Could be BlockBlob or PageBlob
+ content_encoding:
+ Optional. Specifies which content encodings have been applied to
+ the blob. This value is returned to the client when the Get Blob
+ (REST API) operation is performed on the blob resource. The client
+ can use this value when returned to decode the blob content.
+ content_language:
+ Optional. Specifies the natural languages used by this resource.
+ content_md5:
+ Optional. An MD5 hash of the blob content. This hash is used to
+ verify the integrity of the blob during transport. When this header
+ is specified, the storage service checks the hash that has arrived
+ with the one that was sent. If the two hashes do not match, the
+ operation will fail with error code 400 (Bad Request).
+ cache_control:
+ Optional. The Blob service stores this value but does not use or
+ modify it.
+ x_ms_blob_content_type: Optional. Set the blob's content type.
+ x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
+ x_ms_blob_content_language: Optional. Set the blob's content language.
+ x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
+ x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
- blob: the content of blob.
+ x_ms_blob_content_length:
+ Required for page blobs. This header specifies the maximum size
+ for the page blob, up to 1 TB. The page blob size must be aligned
+ to a 512-byte boundary.
+ x_ms_blob_sequence_number:
+ Optional. Set for page blobs only. The sequence number is a
+ user-controlled value that you can use to track requests. The
+ value of the sequence number must be between 0 and 2^63 - 1. The
+ default value is 0.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -357,11 +484,21 @@ def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_enco
def get_blob(self, container_name, blob_name, snapshot=None, x_ms_range=None, x_ms_lease_id=None, x_ms_range_get_content_md5=None):
'''
- Reads or downloads a blob from the system, including its metadata and properties.
+ Reads or downloads a blob from the system, including its metadata and
+ properties.
- container_name: the name of container to get the blob
- blob_name: the name of blob
- x_ms_range: Optional. Return only the bytes of the blob in the specified range.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ snapshot:
+ Optional. The snapshot parameter is an opaque DateTime value that,
+ when present, specifies the blob snapshot to retrieve.
+ x_ms_range:
+ Optional. Return only the bytes of the blob in the specified range.
+ x_ms_lease_id: Required if the blob has an active lease.
+ x_ms_range_get_content_md5:
+ Optional. When this header is set to true and specified together
+ with the Range header, the service returns the MD5 hash for the
+ range, as long as the range is less than or equal to 4 MB in size.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -385,8 +522,12 @@ def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease
'''
Returns all user-defined metadata for the specified blob or snapshot.
- container_name: the name of container containing the blob.
- blob_name: the name of blob to get metadata.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ snapshot:
+ Optional. The snapshot parameter is an opaque DateTime value that,
+ when present, specifies the blob snapshot to retrieve.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -400,15 +541,17 @@ def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- return _parse_response_for_dict_prefix(response, prefix='x-ms-meta')
+ return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])
def set_blob_metadata(self, container_name, blob_name, x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
- Sets user-defined metadata for the specified blob as one or more name-value pairs.
+ Sets user-defined metadata for the specified blob as one or more
+ name-value pairs.
- container_name: the name of container containing the blob
- blob_name: the name of blob
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
x_ms_meta_name_values: Dict containing name and value pairs.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -428,10 +571,10 @@ def lease_blob(self, container_name, blob_name, x_ms_lease_action, x_ms_lease_id
'''
Establishes and manages a one-minute lock on a blob for write operations.
- container_name: the name of container.
- blob_name: the name of blob
- x_ms_lease_id: Any GUID format string
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
x_ms_lease_action: Required. Possible values: acquire|renew|release|break
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -454,17 +597,16 @@ def snapshot_blob(self, container_name, blob_name, x_ms_meta_name_values=None, i
'''
Creates a read-only snapshot of a blob.
- container_name: the name of container.
- blob_name: the name of blob
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
- if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
+ if_match:
+ Optional. snapshot the blob only if its ETag value matches the
+ value specified.
if_none_match: Optional. An ETag value
- x_ms_lease_id: Optional. If this header is specified, the operation will be performed
- only if both of the following conditions are met.
- 1. The blob's lease is currently active
- 2. The lease ID specified in the request matches that of the blob.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -490,27 +632,33 @@ def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_
'''
Copies a blob to a destination within the storage account.
- container_name: the name of container.
- blob_name: the name of blob
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
x_ms_copy_source: the blob to be copied. Should be absolute path format.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
- x_ms_source_if_modified_since: Optional. An ETag value. Specify this conditional
- header to copy the source blob only if its ETag matches the value specified.
- x_ms_source_if_unmodified_since: Optional. An ETag value. Specify this conditional
- header to copy the blob only if its ETag does not match the value specified.
- x_ms_source_if_match: Optional. A DateTime value. Specify this conditional header to copy
- the blob only if the source blob has been modified since the specified date/time.
- x_ms_source_if_none_match: Optional. An ETag value. Specify this conditional header to
- copy the source blob only if its ETag matches the value specified.
+ x_ms_source_if_modified_since:
+ Optional. An ETag value. Specify this conditional header to copy
+ the source blob only if its ETag matches the value specified.
+ x_ms_source_if_unmodified_since:
+ Optional. An ETag value. Specify this conditional header to copy
+ the blob only if its ETag does not match the value specified.
+ x_ms_source_if_match:
+ Optional. A DateTime value. Specify this conditional header to
+ copy the blob only if the source blob has been modified since the
+ specified date/time.
+ x_ms_source_if_none_match:
+ Optional. An ETag value. Specify this conditional header to copy
+ the source blob only if its ETag matches the value specified.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
- if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
+ if_match:
+ Optional. Snapshot the blob only if its ETag value matches the
+ value specified.
if_none_match: Optional. An ETag value
- x_ms_lease_id: Optional. If this header is specified, the operation will be performed
- only if both of the following conditions are met.
- 1. The blob's lease is currently active
- 2. The lease ID specified in the request matches that of the blob.
- x-ms-meta-name-values: a dict containing name, value for metadata.
+ x_ms_lease_id: Required if the blob has an active lease.
+ x_ms_source_lease_id:
+ Optional. Specify this to perform the Copy Blob operation only if
+ the lease ID given matches the active lease ID of the source blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -539,18 +687,18 @@ def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_
def delete_blob(self, container_name, blob_name, snapshot=None, x_ms_lease_id=None):
'''
- Marks the specified blob or snapshot for deletion. The blob is later deleted
- during garbage collection.
+ Marks the specified blob or snapshot for deletion. The blob is later
+ deleted during garbage collection.
- To mark a specific snapshot for deletion provide the date/time of the snapshot via
- the snapshot parameter.
+ To mark a specific snapshot for deletion provide the date/time of the
+ snapshot via the snapshot parameter.
- container_name: the name of container.
- blob_name: the name of blob
- x_ms_lease_id: Optional. If this header is specified, the operation will be performed
- only if both of the following conditions are met.
- 1. The blob's lease is currently active
- 2. The lease ID specified in the request matches that of the blob.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ snapshot:
+ Optional. The snapshot parameter is an opaque DateTime value that,
+ when present, specifies the blob snapshot to delete.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -568,13 +716,18 @@ def put_block(self, container_name, blob_name, block, blockid, content_md5=None,
'''
Creates a new block to be committed as part of a blob.
- container_name: the name of the container.
- blob_name: the name of the blob
- content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
- the integrity of the blob during transport. When this header is specified,
- the storage service checks the hash that has arrived with the one that was sent.
- x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
- a blob with an active lease, specify the valid lease ID for this header.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ block: Content of the block.
+ blockid:
+ Required. A value that identifies the block. The string must be
+ less than or equal to 64 bytes in size.
+ content_md5:
+ Optional. An MD5 hash of the block content. This hash is used to
+ verify the integrity of the blob during transport. When this
+ header is specified, the storage service checks the hash that has
+ arrived with the one that was sent.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -596,30 +749,37 @@ def put_block(self, container_name, blob_name, block, blockid, content_md5=None,
def put_block_list(self, container_name, blob_name, block_list, content_md5=None, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
- Writes a blob by specifying the list of block IDs that make up the blob. In order to
- be written as part of a blob, a block must have been successfully written to the server
- in a prior Put Block (REST API) operation.
+ Writes a blob by specifying the list of block IDs that make up the
+ blob. In order to be written as part of a blob, a block must have been
+ successfully written to the server in a prior Put Block (REST API)
+ operation.
- container_name: the name of container.
- blob_name: the name of blob
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ block_list: A str list containing the block ids.
+ content_md5:
+ Optional. An MD5 hash of the block content. This hash is used to
+ verify the integrity of the blob during transport. When this header
+ is specified, the storage service checks the hash that has arrived
+ with the one that was sent.
+ x_ms_blob_cache_control:
+ Optional. Sets the blob's cache control. If specified, this
+ property is stored with the blob and returned with a read request.
+ x_ms_blob_content_type:
+ Optional. Sets the blob's content type. If specified, this property
+ is stored with the blob and returned with a read request.
+ x_ms_blob_content_encoding:
+ Optional. Sets the blob's content encoding. If specified, this
+ property is stored with the blob and returned with a read request.
+ x_ms_blob_content_language:
+ Optional. Set the blob's content language. If specified, this
+ property is stored with the blob and returned with a read request.
+ x_ms_blob_content_md5:
+ Optional. An MD5 hash of the blob content. Note that this hash is
+ not validated, as the hashes for the individual blocks were
+ validated when each was uploaded.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
- x_ms_blob_cache_control: Optional. Sets the blob's cache control. If specified, this
- property is stored with the blob and returned with a read request.
- x_ms_blob_content_type: Optional. Sets the blob's content type. If specified, this
- property is stored with the blob and returned with a read request.
- x_ms_blob_content_encoding: Optional. Sets the blob's content encoding. If specified,
- this property is stored with the blob and returned with a read request.
- x_ms_blob_content_language: Optional. Set the blob's content language. If specified,
- this property is stored with the blob and returned with a read request.
- x_ms_blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash
- is not validated, as the hashes for the individual blocks were validated when
- each was uploaded.
- content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
- the integrity of the blob during transport. When this header is specified,
- the storage service checks the hash that has arrived with the one that was sent.
- x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
- a blob with an active lease, specify the valid lease ID for this header.
- x-ms-meta-name-values: a dict containing name, value for metadata.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -638,21 +798,25 @@ def put_block_list(self, container_name, blob_name, block_list, content_md5=None
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
- request.body = _get_request_body(convert_block_list_to_xml(block_list))
+ request.body = _get_request_body(_convert_block_list_to_xml(block_list))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype=None, x_ms_lease_id=None):
'''
- Retrieves the list of blocks that have been uploaded as part of a block blob.
+ Retrieves the list of blocks that have been uploaded as part of a
+ block blob.
- container_name: the name of container.
- blob_name: the name of blob
- snapshot: Optional. Datetime to determine the time to retrieve the blocks.
- blocklisttype: Specifies whether to return the list of committed blocks, the
- list of uncommitted blocks, or both lists together. Valid values are
- committed, uncommitted, or all.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ snapshot:
+ Optional. Datetime to determine the time to retrieve the blocks.
+ blocklisttype:
+ Specifies whether to return the list of committed blocks, the list
+ of uncommitted blocks, or both lists together. Valid values are:
+ committed, uncommitted, or all.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -669,28 +833,71 @@ def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- return convert_response_to_block_list(response)
+ return _convert_response_to_block_list(response)
def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write, timeout=None, content_md5=None, x_ms_lease_id=None, x_ms_if_sequence_number_lte=None, x_ms_if_sequence_number_lt=None, x_ms_if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None):
'''
Writes a range of pages to a page blob.
- container_name: the name of container.
- blob_name: the name of blob
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ page: Content of the page.
+ x_ms_range:
+ Required. Specifies the range of bytes to be written as a page.
+ Both the start and end of the range must be specified. Must be in
+ format: bytes=startByte-endByte. Given that pages must be aligned
+ with 512-byte boundaries, the start offset must be a modulus of
+ 512 and the end offset must be a modulus of 512-1. Examples of
+ valid byte ranges are 0-511, 512-1023, etc.
+ x_ms_page_write:
+ Required. You may specify one of the following options:
+ update (lower case):
+ Writes the bytes specified by the request body into the
+ specified range. The Range and Content-Length headers must
+ match to perform the update.
+ clear (lower case):
+ Clears the specified range and releases the space used in
+ storage for that range. To clear a range, set the
+ Content-Length header to zero, and the Range header to a
+ value that indicates the range to clear, up to maximum
+ blob size.
timeout: the timeout parameter is expressed in seconds.
- x_ms_range: Required. Specifies the range of bytes to be written as a page. Both the start
- and end of the range must be specified. Must be in format: bytes=startByte-endByte.
- Given that pages must be aligned with 512-byte boundaries, the start offset must be
- a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
- byte ranges are 0-511, 512-1023, etc.
- x_ms_page_write: Required. You may specify one of the following options :
- 1. update(lower case): Writes the bytes specified by the request body into the specified
- range. The Range and Content-Length headers must match to perform the update.
- 2. clear(lower case): Clears the specified range and releases the space used in storage
- for that range. To clear a range, set the Content-Length header to zero, and the Range
- header to a value that indicates the range to clear, up to maximum blob size.
- x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
- with an active lease, specify the valid lease ID for this header.
+ content_md5:
+ Optional. An MD5 hash of the page content. This hash is used to
+ verify the integrity of the page during transport. When this header
+ is specified, the storage service compares the hash of the content
+ that has arrived with the header value that was sent. If the two
+ hashes do not match, the operation will fail with error code 400
+ (Bad Request).
+ x_ms_lease_id: Required if the blob has an active lease.
+ x_ms_if_sequence_number_lte:
+ Optional. If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ x_ms_if_sequence_number_lt:
+ Optional. If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ x_ms_if_sequence_number_eq:
+ Optional. If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ if_modified_since:
+ Optional. A DateTime value. Specify this conditional header to
+ write the page only if the blob has been modified since the
+ specified date/time. If the blob has not been modified, the Blob
+ service fails.
+ if_unmodified_since:
+ Optional. A DateTime value. Specify this conditional header to
+ write the page only if the blob has not been modified since the
+ specified date/time. If the blob has been modified, the Blob
+ service fails.
+ if_match:
+ Optional. An ETag value. Specify an ETag value for this conditional
+ header to write the page only if the blob's ETag value matches the
+ value specified. If the values do not match, the Blob service fails.
+ if_none_match:
+ Optional. An ETag value. Specify an ETag value for this conditional
+ header to write the page only if the blob's ETag value does not
+ match the value specified. If the values are identical, the Blob
+ service fails.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
@@ -724,15 +931,23 @@ def get_page_ranges(self, container_name, blob_name, snapshot=None, range=None,
'''
Retrieves the page ranges for a blob.
- container_name: the name of container.
- blob_name: the name of blob
- _ms_range: Optional. Specifies the range of bytes to be written as a page. Both the start
- and end of the range must be specified. Must be in format: bytes=startByte-endByte.
- Given that pages must be aligned with 512-byte boundaries, the start offset must be
- a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
- byte ranges are 0-511, 512-1023, etc.
- x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
- with an active lease, specify the valid lease ID for this header.
+ container_name: Name of existing container.
+ blob_name: Name of existing blob.
+ snapshot:
+ Optional. The snapshot parameter is an opaque DateTime value that,
+ when present, specifies the blob snapshot to retrieve information
+ from.
+ range:
+ Optional. Specifies the range of bytes over which to list ranges,
+ inclusively. If omitted, then all ranges for the blob are returned.
+ x_ms_range:
+ Optional. Specifies the range of bytes to be written as a page.
+ Both the start and end of the range must be specified. Must be in
+ format: bytes=startByte-endByte. Given that pages must be aligned
+ with 512-byte boundaries, the start offset must be a modulus of
+ 512 and the end offset must be a modulus of 512-1. Examples of
+ valid byte ranges are 0-511, 512-1023, etc.
+ x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
diff --git a/src/azure/storage/cloudstorageaccount.py b/src/azure/storage/cloudstorageaccount.py
index ead0928ac99c..05a5cc18c190 100644
--- a/src/azure/storage/cloudstorageaccount.py
+++ b/src/azure/storage/cloudstorageaccount.py
@@ -17,9 +17,11 @@
from azure.storage.queueservice import QueueService
class CloudStorageAccount:
- """Provides a factory for creating the blob, queue, and table services
+ """
+ Provides a factory for creating the blob, queue, and table services
with a common account name and account key. Users can either use the
- factory or can construct the appropriate service directly."""
+ factory or can construct the appropriate service directly.
+ """
def __init__(self, account_name=None, account_key=None):
self.account_name = account_name
diff --git a/src/azure/storage/queueservice.py b/src/azure/storage/queueservice.py
index f665fca37132..d4ecf13c0ea7 100644
--- a/src/azure/storage/queueservice.py
+++ b/src/azure/storage/queueservice.py
@@ -12,41 +12,58 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-import base64
-import os
-import urllib2
-
-from azure.storage import *
-from azure.storage.storageclient import _StorageClient
-from azure.storage import (_update_storage_queue_header)
+from azure import (WindowsAzureConflictError,
+ WindowsAzureError,
+ DEV_QUEUE_HOST,
+ QUEUE_SERVICE_HOST_BASE,
+ xml_escape,
+ _convert_class_to_xml,
+ _dont_fail_not_exist,
+ _dont_fail_on_exist,
+ _get_request_body,
+ _int_or_none,
+ _parse_enum_results_list,
+ _parse_response,
+ _parse_response_for_dict_filter,
+ _parse_response_for_dict_prefix,
+ _str,
+ _str_or_none,
+ _update_request_uri_query_local_storage,
+ _validate_not_none,
+ _ERROR_CONFLICT,
+ )
from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
-from azure import (_validate_not_none, Feed,
- _convert_response_to_feeds, _str, _str_or_none, _int_or_none,
- _get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
- WindowsAzureError, _parse_response, _convert_class_to_xml,
- _parse_response_for_dict, _parse_response_for_dict_prefix,
- _parse_response_for_dict_filter,
- _parse_enum_results_list, _update_request_uri_query_local_storage,
- _parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
+from azure.storage import (Queue,
+ QueueEnumResults,
+ QueueMessagesList,
+ StorageServiceProperties,
+ _update_storage_queue_header,
+ )
+from azure.storage.storageclient import _StorageClient
class QueueService(_StorageClient):
'''
This is the main class managing queue resources.
- account_name: your storage account name, required for all operations.
- account_key: your storage account key, required for all operations.
'''
def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = QUEUE_SERVICE_HOST_BASE, dev_host = DEV_QUEUE_HOST):
+ '''
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ protocol: Optional. Protocol. Defaults to http.
+ host_base:
+ Optional. Live host base url. Defaults to Azure url. Override this
+ for on-premise.
+ dev_host: Optional. Dev host url. Defaults to localhost.
+ '''
return super(QueueService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
def get_queue_service_properties(self, timeout=None):
'''
- Gets the properties of a storage account's Queue Service, including Windows Azure
- Storage Analytics.
+ Gets the properties of a storage account's Queue Service, including
+ Windows Azure Storage Analytics.
- timeout: Optional. The timeout parameter is expressed in seconds. For example, the
- following value sets a timeout of 30 seconds for the request: timeout=30
+ timeout: Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
@@ -62,6 +79,23 @@ def get_queue_service_properties(self, timeout=None):
def list_queues(self, prefix=None, marker=None, maxresults=None, include=None):
'''
Lists all of the queues in a given storage account.
+
+ prefix:
+ Filters the results to return only queues with names that begin
+ with the specified prefix.
+ marker:
+ A string value that identifies the portion of the list to be
+ returned with the next list operation. The operation returns a
+ NextMarker element within the response body if the list returned
+ was not complete. This value may then be used as a query parameter
+ in a subsequent call to request the next portion of the list of
+ queues. The marker value is opaque to the client.
+ maxresults:
+ Specifies the maximum number of queues to return. If maxresults is
+ not specified, the server will return up to 5,000 items.
+ include:
+ Optional. Include this parameter to specify that the container's
+ metadata be returned as part of the response body.
'''
request = HTTPRequest()
request.method = 'GET'
@@ -84,9 +118,10 @@ def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=Fal
Creates a queue under the given account.
queue_name: name of the queue.
- x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
- with the queue as metadata.
- fail_on_exist: specify whether throw exception when queue exists.
+ x_ms_meta_name_values:
+ Optional. A dict containing name-value pairs to associate with the
+ queue as metadata.
+ fail_on_exist: Specify whether throw exception when queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -108,15 +143,16 @@ def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=Fal
else:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
- raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
+ raise WindowsAzureConflictError(_ERROR_CONFLICT)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Permanently deletes the specified queue.
- queue_name: name of the queue.
- fail_not_exist: specify whether throw exception when queue doesn't exist.
+ queue_name: Name of the queue.
+ fail_not_exist:
+ Specify whether throw exception when queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -138,10 +174,10 @@ def delete_queue(self, queue_name, fail_not_exist=False):
def get_queue_metadata(self, queue_name):
'''
- Retrieves user-defined metadata and queue properties on the specified queue.
- Metadata is associated with the queue as name-values pairs.
+ Retrieves user-defined metadata and queue properties on the specified
+ queue. Metadata is associated with the queue as name-values pairs.
- queue_name: name of the queue.
+ queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -152,16 +188,17 @@ def get_queue_metadata(self, queue_name):
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- return _parse_response_for_dict_prefix(response, prefix='x-ms-meta')
+ return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])
def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
'''
- Sets user-defined metadata on the specified queue. Metadata is associated
- with the queue as name-value pairs.
+ Sets user-defined metadata on the specified queue. Metadata is
+ associated with the queue as name-value pairs.
- queue_name: name of the queue.
- x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
- with the queue as metadata.
+ queue_name: Name of the queue.
+ x_ms_meta_name_values:
+ Optional. A dict containing name-value pairs to associate with the
+ queue as metadata.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -175,18 +212,26 @@ def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
def put_message(self, queue_name, message_text, visibilitytimeout=None, messagettl=None):
'''
- Adds a new message to the back of the message queue. A visibility timeout can
- also be specified to make the message invisible until the visibility timeout
- expires. A message must be in a format that can be included in an XML request
- with UTF-8 encoding. The encoded message can be up to 64KB in size for versions
- 2011-08-18 and newer, or 8KB in size for previous versions.
+ Adds a new message to the back of the message queue. A visibility
+ timeout can also be specified to make the message invisible until the
+ visibility timeout expires. A message must be in a format that can be
+ included in an XML request with UTF-8 encoding. The encoded message can
+ be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size
+ for previous versions.
- queue_name: name of the queue.
- visibilitytimeout: Optional. If specified, the request must be made using an
- x-ms-version of 2011-08-18 or newer.
- messagettl: Optional. Specifies the time-to-live interval for the message,
- in seconds. The maximum time-to-live allowed is 7 days. If this parameter
- is omitted, the default time-to-live is 7 days.
+ queue_name: Name of the queue.
+ message_text: Message content.
+ visibilitytimeout:
+ Optional. If not specified, the default value is 0. Specifies the
+ new visibility timeout value, in seconds, relative to server time.
+ The new value must be larger than or equal to 0, and cannot be
+ larger than 7 days. The visibility timeout of a message cannot be
+ set to a value later than the expiry time. visibilitytimeout
+ should be set to a value smaller than the time-to-live value.
+ messagettl:
+ Optional. Specifies the time-to-live interval for the message, in
+ seconds. The maximum time-to-live allowed is 7 days. If this
+ parameter is omitted, the default time-to-live is 7 days.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_text', message_text)
@@ -210,16 +255,19 @@ def get_messages(self, queue_name, numofmessages=None, visibilitytimeout=None):
'''
Retrieves one or more messages from the front of the queue.
- queue_name: name of the queue.
- numofmessages: Optional. A nonzero integer value that specifies the number of
- messages to retrieve from the queue, up to a maximum of 32. If fewer are
- visible, the visible messages are returned. By default, a single message
- is retrieved from the queue with this operation.
- visibilitytimeout: Required. Specifies the new visibility timeout value, in
- seconds, relative to server time. The new value must be larger than or
- equal to 1 second, and cannot be larger than 7 days, or larger than 2
- hours on REST protocol versions prior to version 2011-08-18. The visibility
- timeout of a message can be set to a value later than the expiry time.
+ queue_name: Name of the queue.
+ numofmessages:
+ Optional. A nonzero integer value that specifies the number of
+ messages to retrieve from the queue, up to a maximum of 32. If
+ fewer are visible, the visible messages are returned. By default,
+ a single message is retrieved from the queue with this operation.
+ visibilitytimeout:
+ Specifies the new visibility timeout value, in seconds, relative
+ to server time. The new value must be larger than or equal to 1
+ second, and cannot be larger than 7 days, or larger than 2 hours
+ on REST protocol versions prior to version 2011-08-18. The
+ visibility timeout of a message can be set to a value later than
+ the expiry time.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -241,10 +289,11 @@ def peek_messages(self, queue_name, numofmessages=None):
Retrieves one or more messages from the front of the queue, but does not alter
the visibility of the message.
- queue_name: name of the queue.
- numofmessages: Optional. A nonzero integer value that specifies the number of
- messages to peek from the queue, up to a maximum of 32. By default,
- a single message is peeked from the queue with this operation.
+ queue_name: Name of the queue.
+ numofmessages:
+ Optional. A nonzero integer value that specifies the number of
+ messages to peek from the queue, up to a maximum of 32. By default,
+ a single message is peeked from the queue with this operation.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -262,9 +311,11 @@ def delete_message(self, queue_name, message_id, popreceipt):
'''
Deletes the specified message.
- queue_name: name of the queue.
- popreceipt: Required. A valid pop receipt value returned from an earlier call
- to the Get Messages or Update Message operation.
+ queue_name: Name of the queue.
+ message_id: Message to delete.
+ popreceipt:
+ Required. A valid pop receipt value returned from an earlier call
+ to the Get Messages or Update Message operation.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
@@ -282,7 +333,7 @@ def clear_messages(self, queue_name):
'''
Deletes all messages from the specified queue.
- queue_name: name of the queue.
+ queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
@@ -298,14 +349,18 @@ def update_message(self, queue_name, message_id, message_text, popreceipt, visib
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
- queue_name: name of the queue.
- popreceipt: Required. A valid pop receipt value returned from an earlier call
- to the Get Messages or Update Message operation.
- visibilitytimeout: Required. Specifies the new visibility timeout value, in
- seconds, relative to server time. The new value must be larger than or
- equal to 0, and cannot be larger than 7 days. The visibility timeout
- of a message cannot be set to a value later than the expiry time. A
- message can be updated until it has been deleted or has expired.
+ queue_name: Name of the queue.
+ message_id: Message to update.
+ message_text: Content of message.
+ popreceipt:
+ Required. A valid pop receipt value returned from an earlier call
+ to the Get Messages or Update Message operation.
+ visibilitytimeout:
+ Required. Specifies the new visibility timeout value, in seconds,
+ relative to server time. The new value must be larger than or equal
+ to 0, and cannot be larger than 7 days. The visibility timeout of a
+ message cannot be set to a value later than the expiry time. A
+ message can be updated until it has been deleted or has expired.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
@@ -335,7 +390,7 @@ def set_queue_service_properties(self, storage_service_properties, timeout=None)
Sets the properties of a storage account's Queue service, including Windows Azure
Storage Analytics.
- storage_service_properties: a StorageServiceProperties object.
+ storage_service_properties: StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
diff --git a/src/azure/storage/storageclient.py b/src/azure/storage/storageclient.py
index 1bb259c391e8..5edc068586d3 100644
--- a/src/azure/storage/storageclient.py
+++ b/src/azure/storage/storageclient.py
@@ -12,19 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-import base64
-import urllib2
-import hmac
-import hashlib
import os
-
-from azure.storage import _storage_error_handler
-from azure.http.httpclient import _HTTPClient
+from azure import (WindowsAzureError,
+ DEV_ACCOUNT_NAME,
+ DEV_ACCOUNT_KEY,
+ _ERROR_STORAGE_MISSING_INFO,
+ )
from azure.http import HTTPError
-from azure import (_parse_response, WindowsAzureError,
- DEV_ACCOUNT_NAME, DEV_ACCOUNT_KEY)
-import azure
+from azure.http.httpclient import _HTTPClient
+from azure.storage import _storage_error_handler
#--------------------------------------------------------------------------
# constants for azure app setting environment variables
@@ -39,6 +36,15 @@ class _StorageClient(object):
'''
def __init__(self, account_name=None, account_key=None, protocol='http', host_base='', dev_host=''):
+ '''
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ protocol: Optional. Protocol. Defaults to http.
+ host_base:
+ Optional. Live host base url. Defaults to Azure url. Override this
+ for on-premise.
+ dev_host: Optional. Dev host url. Defaults to localhost.
+ '''
if account_name is not None:
self.account_name = account_name.encode('ascii', 'ignore')
else:
@@ -82,19 +88,21 @@ def __init__(self, account_name=None, account_key=None, protocol='http', host_ba
self.account_key = os.environ[AZURE_STORAGE_ACCESS_KEY]
if not self.account_name or not self.account_key:
- raise WindowsAzureError(azure._ERROR_STORAGE_MISSING_INFO)
+ raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO)
self._httpclient = _HTTPClient(service_instance=self, account_key=self.account_key, account_name=self.account_name, protocol=protocol)
self._batchclient = None
self._filter = self._perform_request_worker
def with_filter(self, filter):
- '''Returns a new service which will process requests with the
- specified filter. Filtering operations can include logging, automatic
- retrying, etc... The filter is a lambda which receives the HTTPRequest
- and another lambda. The filter can perform any pre-processing on the
- request, pass it off to the next lambda, and then perform any post-processing
- on the response.'''
+ '''
+ Returns a new service which will process requests with the specified
+ filter. Filtering operations can include logging, automatic retrying,
+ etc... The filter is a lambda which receives the HTTPRequest and
+ another lambda. The filter can perform any pre-processing on the
+ request, pass it off to the next lambda, and then perform any
+ post-processing on the response.
+ '''
res = type(self)(self.account_name, self.account_key, self.protocol)
old_filter = self._filter
def new_filter(request):
@@ -104,7 +112,9 @@ def new_filter(request):
return res
def set_proxy(self, host, port):
- '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ '''
+ Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
+ '''
self._httpclient.set_proxy(host, port)
def _get_host(self):
@@ -117,8 +127,10 @@ def _perform_request_worker(self, request):
return self._httpclient.perform_request(request)
def _perform_request(self, request):
- ''' Sends the request and return response. Catches HTTPError and hand it to error handler'''
-
+ '''
+ Sends the request and return response. Catches HTTPError and hand it
+ to error handler
+ '''
try:
if self._batchclient is not None:
return self._batchclient.insert_request_to_batch(request)
diff --git a/src/azure/storage/tableservice.py b/src/azure/storage/tableservice.py
index adc62d93ad37..c795db3050cf 100644
--- a/src/azure/storage/tableservice.py
+++ b/src/azure/storage/tableservice.py
@@ -12,36 +12,52 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-import base64
-import os
-import urllib2
-
-from azure.storage import *
-from azure.storage.storageclient import _StorageClient
-from azure.storage import (_update_storage_table_header,
- convert_table_to_xml, _convert_xml_to_table,
- convert_entity_to_xml, _convert_response_to_entity,
- _convert_xml_to_entity, _sign_storage_table_request)
+from azure import (WindowsAzureError,
+ TABLE_SERVICE_HOST_BASE,
+ DEV_TABLE_HOST,
+ xml_escape,
+ _convert_class_to_xml,
+ _convert_response_to_feeds,
+ _dont_fail_not_exist,
+ _dont_fail_on_exist,
+ _get_request_body,
+ _int_or_none,
+ _parse_response,
+ _parse_response_for_dict,
+ _parse_response_for_dict_filter,
+ _str,
+ _str_or_none,
+ _update_request_uri_query_local_storage,
+ _validate_not_none,
+ )
+from azure.http import HTTPRequest
from azure.http.batchclient import _BatchClient
-from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
-from azure import (_validate_not_none, Feed,
- _convert_response_to_feeds, _str, _str_or_none, _int_or_none,
- _get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
- WindowsAzureError, _parse_response, _convert_class_to_xml,
- _parse_response_for_dict, _parse_response_for_dict_prefix,
- _parse_response_for_dict_filter,
- _parse_enum_results_list, _update_request_uri_query_local_storage,
- _parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
+from azure.storage import (StorageServiceProperties,
+ _convert_entity_to_xml,
+ _convert_response_to_entity,
+ _convert_table_to_xml,
+ _convert_xml_to_entity,
+ _convert_xml_to_table,
+ _sign_storage_table_request,
+ _update_storage_table_header,
+ )
+from azure.storage.storageclient import _StorageClient
class TableService(_StorageClient):
'''
This is the main class managing Table resources.
- account_name: your storage account name, required for all operations.
- account_key: your storage account key, required for all operations.
'''
def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = TABLE_SERVICE_HOST_BASE, dev_host = DEV_TABLE_HOST):
+ '''
+ account_name: your storage account name, required for all operations.
+ account_key: your storage account key, required for all operations.
+ protocol: Optional. Protocol. Defaults to http.
+ host_base:
+ Optional. Live host base url. Defaults to Azure url. Override this
+ for on-premise.
+ dev_host: Optional. Dev host url. Defaults to localhost.
+ '''
return super(TableService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
def begin_batch(self):
@@ -61,8 +77,8 @@ def cancel_batch(self):
def get_table_service_properties(self):
'''
- Gets the properties of a storage account's Table service, including Windows Azure
- Storage Analytics.
+ Gets the properties of a storage account's Table service, including
+ Windows Azure Storage Analytics.
'''
request = HTTPRequest()
request.method = 'GET'
@@ -76,9 +92,10 @@ def get_table_service_properties(self):
def set_table_service_properties(self, storage_service_properties):
'''
- Sets the properties of a storage account's Table Service, including Windows Azure Storage Analytics.
+ Sets the properties of a storage account's Table Service, including
+ Windows Azure Storage Analytics.
- storage_service_properties: a StorageServiceProperties object.
+ storage_service_properties: StorageServiceProperties object.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
@@ -96,8 +113,11 @@ def query_tables(self, table_name = None, top=None, next_table_name=None):
'''
Returns a list of tables under the specified account.
- table_name: optional, the specific table to query
- top: the maximum number of tables to return
+ table_name: Optional. The specific table to query.
+ top: Optional. Maximum number of tables to return.
+ next_table_name:
+ Optional. When top is used, the next table name is stored in
+ result.x_ms_continuation['NextTableName']
'''
request = HTTPRequest()
request.method = 'GET'
@@ -121,17 +141,18 @@ def create_table(self, table, fail_on_exist=False):
'''
Creates a new table in the storage account.
- table: name of the table to create. Table name may contain only alphanumeric characters
- and cannot begin with a numeric character. It is case-insensitive and must be from
- 3 to 63 characters long.
- fail_on_exist: specify whether throw exception when table exists.
+ table:
+ Name of the table to create. Table name may contain only
+ alphanumeric characters and cannot begin with a numeric character.
+ It is case-insensitive and must be from 3 to 63 characters long.
+ fail_on_exist: Specify whether throw exception when table exists.
'''
_validate_not_none('table', table)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/Tables'
- request.body = _get_request_body(convert_table_to_xml(table))
+ request.body = _get_request_body(_convert_table_to_xml(table))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_on_exist:
@@ -147,9 +168,9 @@ def create_table(self, table, fail_on_exist=False):
def delete_table(self, table_name, fail_not_exist=False):
'''
- table_name: name of the table to delete.
-
- fail_not_exist: specify whether throw exception when table doesn't exist.
+ table_name: Name of the table to delete.
+ fail_not_exist:
+ Specify whether throw exception when table doesn't exist.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
@@ -175,7 +196,7 @@ def get_entity(self, table_name, partition_key, row_key, select=''):
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
- select: the property names to select.
+ select: Property names to select.
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
@@ -195,10 +216,18 @@ def query_entities(self, table_name, filter=None, select=None, top=None, next_pa
'''
Get entities in a table; includes the $filter and $select options.
- table_name: the table to query
- filter: a filter as described at http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
- select: the property names to select from the entities
- top: the maximum number of entities to return
+ table_name: Table to query.
+ filter:
+ Optional. Filter as described at
+ http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
+ select: Optional. Property names to select from the entities.
+ top: Optional. Maximum number of entities to return.
+ next_partition_key:
+ Optional. When top is used, the next partition key is stored in
+ result.x_ms_continuation['NextPartitionKey']
+ next_row_key:
+ Optional. When top is used, the next partition key is stored in
+ result.x_ms_continuation['NextRowKey']
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
@@ -222,8 +251,11 @@ def insert_entity(self, table_name, entity, content_type='application/atom+xml')
'''
Inserts a new entity into a table.
- entity: Required. The entity object to insert. Could be a dict format or entity object.
- Content-Type: this is required and has to be set to application/atom+xml
+ table_name: Table name.
+ entity:
+ Required. The entity object to insert. Could be a dict format or
+ entity object.
+ content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('entity', entity)
@@ -233,7 +265,7 @@ def insert_entity(self, table_name, entity, content_type='application/atom+xml')
request.host = self._get_host()
request.path = '/' + _str(table_name) + ''
request.headers = [('Content-Type', _str_or_none(content_type))]
- request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
@@ -242,13 +274,20 @@ def insert_entity(self, table_name, entity, content_type='application/atom+xml')
def update_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
- Updates an existing entity in a table. The Update Entity operation replaces the entire
- entity and can be used to remove properties.
+ Updates an existing entity in a table. The Update Entity operation
+ replaces the entire entity and can be used to remove properties.
- entity: Required. The entity object to insert. Could be a dict format or entity object.
+ table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
- Content-Type: this is required and has to be set to application/atom+xml
+ entity:
+ Required. The entity object to insert. Could be a dict format or
+ entity object.
+ content_type: Required. Must be set to application/atom+xml
+ if_match:
+ Optional. Specifies the condition for which the merge should be
+ performed. To force an unconditional merge, set to the wildcard
+ character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
@@ -263,7 +302,7 @@ def update_entity(self, table_name, partition_key, row_key, entity, content_type
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
- request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
@@ -272,13 +311,21 @@ def update_entity(self, table_name, partition_key, row_key, entity, content_type
def merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
- Updates an existing entity by updating the entity's properties. This operation does
- not replace the existing entity as the Update Entity operation does.
+ Updates an existing entity by updating the entity's properties. This
+ operation does not replace the existing entity as the Update Entity
+ operation does.
- entity: Required. The entity object to insert. Can be a dict format or entity object.
+ table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
- Content-Type: this is required and has to be set to application/atom+xml
+ entity:
+ Required. The entity object to insert. Can be a dict format or
+ entity object.
+ content_type: Required. Must be set to application/atom+xml
+ if_match:
+ Optional. Specifies the condition for which the merge should be
+ performed. To force an unconditional merge, set to the wildcard
+ character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
@@ -293,7 +340,7 @@ def merge_entity(self, table_name, partition_key, row_key, entity, content_type=
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
- request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
@@ -304,11 +351,14 @@ def delete_entity(self, table_name, partition_key, row_key, content_type='applic
'''
Deletes an existing entity in a table.
+ table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
- if_match: Required. Specifies the condition for which the delete should be performed.
- To force an unconditional delete, set If-Match to the wildcard character (*).
- Content-Type: this is required and has to be set to application/atom+xml
+ content_type: Required. Must be set to application/atom+xml
+ if_match:
+ Optional. Specifies the condition for which the delete should be
+ performed. To force an unconditional delete, set to the wildcard
+ character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
@@ -329,14 +379,17 @@ def delete_entity(self, table_name, partition_key, row_key, content_type='applic
def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml'):
'''
- Replaces an existing entity or inserts a new entity if it does not exist in the table.
- Because this operation can insert or update an entity, it is also known as an "upsert"
- operation.
+ Replaces an existing entity or inserts a new entity if it does not
+ exist in the table. Because this operation can insert or update an
+ entity, it is also known as an "upsert" operation.
- entity: Required. The entity object to insert. Could be a dict format or entity object.
+ table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
- Content-Type: this is required and has to be set to application/atom+xml
+ entity:
+ Required. The entity object to insert. Could be a dict format or
+ entity object.
+ content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
@@ -348,7 +401,7 @@ def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, c
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
- request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
@@ -357,14 +410,17 @@ def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, c
def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml'):
'''
- Merges an existing entity or inserts a new entity if it does not exist in the table.
- Because this operation can insert or update an entity, it is also known as an "upsert"
- operation.
+ Merges an existing entity or inserts a new entity if it does not exist
+ in the table. Because this operation can insert or update an entity,
+ it is also known as an "upsert" operation.
- entity: Required. The entity object to insert. Could be a dict format or entity object.
+ table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
- Content-Type: this is required and has to be set to application/atom+xml
+ entity:
+ Required. The entity object to insert. Could be a dict format or
+ entity object.
+ content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
@@ -376,14 +432,13 @@ def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, con
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
- request.body = _get_request_body(convert_entity_to_xml(entity))
+ request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
-
def _perform_request_worker(self, request):
auth = _sign_storage_table_request(request,
self.account_name,
diff --git a/src/codegenerator/blob_input.txt b/src/codegenerator/blob_input.txt
deleted file mode 100644
index 55b52f616939..000000000000
--- a/src/codegenerator/blob_input.txt
+++ /dev/null
@@ -1,559 +0,0 @@
-[class]
-BlobService
-[x-ms-version]
-2011-08-18
-[class-comment]
-This is the main class managing Blob resources.
-account_name: your storage account name, required for all operations.
-account_key: your storage account key, required for all operations.
-[init]
-account_name
-account_key
-
-[method]
-list_containers
-[comment]
-The List Containers operation returns a list of the containers under the specified account.
-
-prefix: Optional. Filters the results to return only containers whose names begin with
- the specified prefix.
-marker: Optional. A string value that identifies the portion of the list to be returned
- with the next list operation.
-maxresults: Optional. Specifies the maximum number of containers to return.
-include: Optional. Include this parameter to specify that the container's metadata be
- returned as part of the response body. set this parameter to string 'metadata' to
- get container's metadata.
-[return]
-ContainerEnumResults
-[url]
-GET http://.blob.core.windows.net/?comp=list
-[query]
-prefix=
-marker=
-maxresults=
-include=
-
-[method]
-create_container
-[params]
-fail_on_exist=False
-[comment]
-Creates a new container under the specified account. If the container with the same name
-already exists, the operation fails.
-
-x_ms_meta_name_values: Optional. A dict with name_value pairs to associate with the
- container as metadata. Example:{'Category':'test'}
-x_ms_blob_public_access: Optional. Possible values include: container, blob.
-fail_on_exist: specify whether to throw an exception when the container exists.
-[return]
-None
-[url]
-PUT http://.blob.core.windows.net/?restype=container
-[requestheader]
-x-ms-meta-name-values=
-x-ms-blob-public-access=
-
-[method]
-get_container_properties
-[comment]
-Returns all user-defined metadata and system properties for the specified container.
-[return]
-dict
-[url]
-GET http://.blob.core.windows.net/?restype=container
-
-[method]
-get_container_metadata
-[comment]
-Returns all user-defined metadata for the specified container. The metadata will be
-in returned dictionary['x-ms-meta-(name)'].
-[return]
-dict
-[url]
-GET http://.blob.core.windows.net/?restype=container&comp=metadata
-
-[method]
-set_container_metadata
-[comment]
-Sets one or more user-defined name-value pairs for the specified container.
-
-x_ms_meta_name_values: A dict containing name, value for metadata. Example: {'category':'test'}
-[return]
-[url]
-PUT http://.blob.core.windows.net/?restype=container&comp=metadata
-[requestheader]
-x-ms-meta-name-values=
-
-[method]
-get_container_acl
-[comment]
-Gets the permissions for the specified container.
-[return]
-SignedIdentifiers
-[url]
-GET http://.blob.core.windows.net/?restype=container&comp=acl
-
-[method]
-set_container_acl
-[comment]
-Sets the permissions for the specified container.
-
-x_ms_blob_public_access: Optional. Possible values include 'container' and 'blob'.
-signed_identifiers: SignedIdentifers instance
-[return]
-[url]
-PUT http://.blob.core.windows.net/?restype=container&comp=acl
-[requestheader]
-x-ms-blob-public-access=
-[requestbody]
-class:signed_identifiers;
-
-[method]
-delete_container
-[params]
-fail_not_exist=False
-[comment]
-Marks the specified container for deletion.
-
-fail_not_exist: specify whether to throw an exception when the container doesn't exist.
-[return]
-None
-[url]
-DELETE http://.blob.core.windows.net/?restype=container
-
-[method]
-list_blobs
-[comment]
-Returns the list of blobs under the specified container.
-[return]
-BlobEnumResults
-[url]
-GET http://.blob.core.windows.net/?restype=container&comp=list
-[query]
-prefix=
-marker=
-maxresults=
-include=
-
-[method]
-set_blob_service_properties
-[comment]
-Sets the properties of a storage account's Blob service, including Windows Azure
-Storage Analytics. You can also use this operation to set the default request
-version for all incoming requests that do not have a version specified.
-
-storage_service_properties: a StorageServiceProperties object.
-timeout: Optional. The timeout parameter is expressed in seconds. For example, the
- following value sets a timeout of 30 seconds for the request: timeout=30.
-[return]
-[url]
-PUT http://.blob.core.windows.net/?restype=service&comp=properties
-[query]
-timeout=
-[requestbody]
-class:storage_service_properties;required
-
-[method]
-get_blob_service_properties
-[comment]
-Gets the properties of a storage account's Blob service, including Windows Azure
-Storage Analytics.
-
-timeout: Optional. The timeout parameter is expressed in seconds. For example, the
- following value sets a timeout of 30 seconds for the request: timeout=30.
-[return]
-StorageServiceProperties
-[url]
-GET http://.blob.core.windows.net/?restype=service&comp=properties
-[query]
-timeout=
-
-[method]
-get_blob_properties
-[comment]
-Returns all user-defined metadata, standard HTTP properties, and system properties for the blob.
-
-x_ms_lease_id: Required if the blob has an active lease.
-[return]
-dict
-[url]
-HEAD http://myaccount.blob.core.windows.net//
-[requestheader]
-x-ms-lease-id=
-
-[method]
-set_blob_properties
-[comment]
-Sets system properties on the blob.
-
-x_ms_blob_cache_control: Optional. Modifies the cache control string for the blob.
-x_ms_blob_content_type: Optional. Sets the blob's content type.
-x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
-x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
-x_ms_blob_content_language: Optional. Sets the blob's content language.
-x_ms_lease_id: Required if the blob has an active lease.
-[return]
-[url]
-PUT http://myaccount.blob.core.windows.net//?comp=properties
-[requestheader]
-x-ms-blob-cache-control=
-x-ms-blob-content-type=
-x-ms-blob-content-md5=
-x-ms-blob-content-encoding=
-x-ms-blob-content-language=
-x-ms-lease-id=
-
-[method]
-put_blob
-[comment]
-Creates a new block blob or page blob, or updates the content of an existing block blob.
-
-container_name: the name of container to put the blob
-blob_name: the name of blob
-x_ms_blob_type: Required. Could be BlockBlob or PageBlob
-x_ms_meta_name_values: A dict containing name, value for metadata.
-x_ms_lease_id: Required if the blob has an active lease.
-blob: the content of blob.
-[return]
-[url]
-PUT http://.blob.core.windows.net//
-[requestheader]
-x-ms-blob-type=;required
-Content-Encoding=
-Content-Language=
-Content-MD5=
-Cache-Control=
-x-ms-blob-content-type=
-x-ms-blob-content-encoding=
-x-ms-blob-content-language=
-x-ms-blob-content-md5=
-x-ms-blob-cache-control=
-x-ms-meta-name-values=;
-x-ms-lease-id=
-x-ms-blob-content-length=
-x-ms-blob-sequence-number=
-[requestbody]
-binary:blob;required
-
-[method]
-get_blob
-[comment]
-Reads or downloads a blob from the system, including its metadata and properties.
-
-container_name: the name of container to get the blob
-blob_name: the name of blob
-x_ms_range: Optional. Return only the bytes of the blob in the specified range.
-[return]
-BlobResult
-[url]
-GET http://.blob.core.windows.net//
-[query]
-snapshot=
-[requestheader]
-x-ms-range=
-x-ms-lease-id=
-x-ms-range-get-content-md5=
-
-[method]
-get_blob_metadata
-[comment]
-Returns all user-defined metadata for the specified blob or snapshot.
-
-container_name: the name of container containing the blob.
-blob_name: the name of blob to get metadata.
-[return]
-dict
-prefix='x-ms-meta'
-[url]
-GET http://.blob.core.windows.net//?comp=metadata
-[query]
-snapshot=
-[requestheader]
-x-ms-lease-id=
-
-[method]
-set_blob_metadata
-[comment]
-Sets user-defined metadata for the specified blob as one or more name-value pairs.
-
-container_name: the name of container containing the blob
-blob_name: the name of blob
-x_ms_meta_name_values: Dict containing name and value pairs.
-[return]
-[url]
-PUT http://.blob.core.windows.net//?comp=metadata
-[requestheader]
-x-ms-meta-name-values=
-x-ms-lease-id=
-
-[method]
-lease_blob
-[comment]
-Establishes and manages a one-minute lock on a blob for write operations.
-
-container_name: the name of container.
-blob_name: the name of blob
-x_ms_lease_id: Any GUID format string
-x_ms_lease_action: Required. Possible values: acquire|renew|release|break
-[return]
-dict
-filter=['x-ms-lease-id']
-[url]
-PUT http://.blob.core.windows.net//?comp=lease
-[requestheader]
-x-ms-lease-id=
-x-ms-lease-action=;required:acquire|renew|release|break
-
-[method]
-snapshot_blob
-[comment]
-Creates a read-only snapshot of a blob.
-
-container_name: the name of container.
-blob_name: the name of blob
-x_ms_meta_name_values: Optional. Dict containing name and value pairs.
-if_modified_since: Optional. Datetime string.
-if_unmodified_since: DateTime string.
-if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
-if_none_match: Optional. An ETag value
-x_ms_lease_id: Optional. If this header is specified, the operation will be performed
- only if both of the following conditions are met.
- 1. The blob's lease is currently active
- 2. The lease ID specified in the request matches that of the blob.
-[return]
-dict
-filter=['x-ms-snapshot', 'etag', 'last-modified']
-[url]
-PUT http://.blob.core.windows.net//?comp=snapshot
-[query]
-[requestheader]
-x-ms-meta-name-values=
-If-Modified-Since=
-If-Unmodified-Since=
-If-Match=
-If-None-Match=
-x-ms-lease-id=
-[requestbody]
-
-[method]
-copy_blob
-[comment]
-Copies a blob to a destination within the storage account.
-
-container_name: the name of container.
-blob_name: the name of blob
-x_ms_copy_source: the blob to be copied. Should be absolute path format.
-x_ms_meta_name_values: Optional. Dict containing name and value pairs.
-x_ms_source_if_modified_since: Optional. An ETag value. Specify this conditional
- header to copy the source blob only if its ETag matches the value specified.
-x_ms_source_if_unmodified_since: Optional. An ETag value. Specify this conditional
- header to copy the blob only if its ETag does not match the value specified.
-x_ms_source_if_match: Optional. A DateTime value. Specify this conditional header to copy
- the blob only if the source blob has been modified since the specified date/time.
-x_ms_source_if_none_match: Optional. An ETag value. Specify this conditional header to
- copy the source blob only if its ETag matches the value specified.
-if_modified_since: Optional. Datetime string.
-if_unmodified_since: DateTime string.
-if_match: Optional. snapshot the blob only if its ETag value matches the value specified.
-if_none_match: Optional. An ETag value
-x_ms_lease_id: Optional. If this header is specified, the operation will be performed
- only if both of the following conditions are met.
- 1. The blob's lease is currently active
- 2. The lease ID specified in the request matches that of the blob.
-[return]
-[url]
-PUT http://.blob.core.windows.net//
-[query]
-[requestheader]
-x-ms-copy-source=;required
-x-ms-meta-name-values=;# a dict containing name, value for metadata.
-x-ms-source-if-modified-since=
-x-ms-source-if-unmodified-since=
-x-ms-source-if-match=
-x-ms-source-if-none-match=
-If-Modified-Since=
-If-Unmodified-Since=
-If-Match=
-If-None-Match=
-x-ms-lease-id=
-x-ms-source-lease-id=
-[requestbody]
-
-[method]
-delete_blob
-[comment]
-Marks the specified blob or snapshot for deletion. The blob is later deleted
-during garbage collection.
-
-To mark a specific snapshot for deletion provide the date/time of the snapshot via
-the snapshot parameter.
-
-container_name: the name of container.
-blob_name: the name of blob
-x_ms_lease_id: Optional. If this header is specified, the operation will be performed
- only if both of the following conditions are met.
- 1. The blob's lease is currently active
- 2. The lease ID specified in the request matches that of the blob.
-[return]
-[url]
-DELETE http://.blob.core.windows.net//
-[query]
-snapshot=
-[requestheader]
-x-ms-lease-id=
-[requestbody]
-
-[method]
-put_block
-[comment]
-Creates a new block to be committed as part of a blob.
-
-container_name: the name of the container.
-blob_name: the name of the blob
-content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
- the integrity of the blob during transport. When this header is specified,
- the storage service checks the hash that has arrived with the one that was sent.
-x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
- a blob with an active lease, specify the valid lease ID for this header.
-[return]
-[url]
-PUT http://.blob.core.windows.net//?comp=block
-[query]
-blockid=;required:base64
-[requestheader]
-Content-MD5=
-x-ms-lease-id=
-[requestbody]
-binary:block;required
-
-[method]
-put_block_list
-[comment]
-Writes a blob by specifying the list of block IDs that make up the blob. In order to
-be written as part of a blob, a block must have been successfully written to the server
-in a prior Put Block (REST API) operation.
-
-container_name: the name of container.
-blob_name: the name of blob
-x_ms_meta_name_values: Optional. Dict containing name and value pairs.
-x_ms_blob_cache_control: Optional. Sets the blob's cache control. If specified, this
- property is stored with the blob and returned with a read request.
-x_ms_blob_content_type: Optional. Sets the blob's content type. If specified, this
- property is stored with the blob and returned with a read request.
-x_ms_blob_content_encoding: Optional. Sets the blob's content encoding. If specified,
- this property is stored with the blob and returned with a read request.
-x_ms_blob_content_language: Optional. Set the blob's content language. If specified,
- this property is stored with the blob and returned with a read request.
-x_ms_blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash
- is not validated, as the hashes for the individual blocks were validated when
- each was uploaded.
-content_md5: Optional. An MD5 hash of the block content. This hash is used to verify
- the integrity of the blob during transport. When this header is specified,
- the storage service checks the hash that has arrived with the one that was sent.
-x_ms_lease_id: Required if the blob has an active lease. To perform this operation on
- a blob with an active lease, specify the valid lease ID for this header.
-[return]
-[url]
-PUT http://.blob.core.windows.net//?comp=blocklist
-[requestheader]
-Content-MD5=
-x-ms-blob-cache-control=
-x-ms-blob-content-type=
-x-ms-blob-content-encoding=
-x-ms-blob-content-language=
-x-ms-blob-content-md5=
-x-ms-meta-name-values=;# a dict containing name, value for metadata.
-x-ms-lease-id=
-[requestbody]
-class:block_list;required
-
-
-[method]
-get_block_list
-[comment]
-Retrieves the list of blocks that have been uploaded as part of a block blob.
-
-container_name: the name of container.
-blob_name: the name of blob
-snapshot: Optional. Datetime to determine the time to retrieve the blocks.
-blocklisttype: Specifies whether to return the list of committed blocks, the
- list of uncommitted blocks, or both lists together. Valid values are
- committed, uncommitted, or all.
-[return]
-BlobBlockList
-[url]
-GET http://.blob.core.windows.net//?comp=blocklist
-[query]
-snapshot=
-blocklisttype=
-[requestheader]
-x-ms-lease-id=
-
-[method]
-put_page
-[comment]
-Writes a range of pages to a page blob.
-
-container_name: the name of container.
-blob_name: the name of blob
-timeout: the timeout parameter is expressed in seconds.
-x_ms_range: Required. Specifies the range of bytes to be written as a page. Both the start
- and end of the range must be specified. Must be in format: bytes=startByte-endByte.
- Given that pages must be aligned with 512-byte boundaries, the start offset must be
- a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
- byte ranges are 0-511, 512-1023, etc.
-x_ms_page_write: Required. You may specify one of the following options :
- 1. update(lower case): Writes the bytes specified by the request body into the specified
- range. The Range and Content-Length headers must match to perform the update.
- 2. clear(lower case): Clears the specified range and releases the space used in storage
- for that range. To clear a range, set the Content-Length header to zero, and the Range
- header to a value that indicates the range to clear, up to maximum blob size.
-x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
- with an active lease, specify the valid lease ID for this header.
-[return]
-[url]
-PUT http://.blob.core.windows.net//?comp=page
-[requestheader]
-x-ms-range=;required
-Content-MD5=
-x-ms-page-write=;required:update|clear
-x-ms-lease-id=
-x-ms-if-sequence-number-lte=
-x-ms-if-sequence-number-lt=
-x-ms-if-sequence-number-eq=
-If-Modified-Since=
-If-Unmodified-Since=
-If-Match=
-If-None-Match=
-[query]
-timeout=
-[requestbody]
-binary:page;required
-
-[method]
-get_page_ranges
-[comment]
-Retrieves the page ranges for a blob.
-
-container_name: the name of container.
-blob_name: the name of blob
-_ms_range: Optional. Specifies the range of bytes to be written as a page. Both the start
- and end of the range must be specified. Must be in format: bytes=startByte-endByte.
- Given that pages must be aligned with 512-byte boundaries, the start offset must be
- a modulus of 512 and the end offset must be a modulus of 512-1. Examples of valid
- byte ranges are 0-511, 512-1023, etc.
-x_ms_lease_id: Required if the blob has an active lease. To perform this operation on a blob
- with an active lease, specify the valid lease ID for this header.
-[return]
-PageList
-[url]
-GET http://.blob.core.windows.net//?comp=pagelist
-[query]
-snapshot=
-[requestheader]
-Range=
-x-ms-range=
-x-ms-lease-id=
-
-[end]
-
diff --git a/src/codegenerator/codegenerator.py b/src/codegenerator/codegenerator.py
deleted file mode 100644
index e6d5c7b04687..000000000000
--- a/src/codegenerator/codegenerator.py
+++ /dev/null
@@ -1,735 +0,0 @@
-#-------------------------------------------------------------------------
-# Copyright (c) Microsoft. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-
-# To Run: C:\Python27\python.exe codegenerator.py
-# It expects the souce files to live in ..\azure\...
-
-from xml.dom import minidom
-import urllib2
-
-BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
-QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
-TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
-SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
-
-def to_legalname(name):
- """Converts the name of a header value into a value which is a valid Python
- attribute name."""
- if name == 'IncludeAPIs':
- return 'include_apis'
- if name[0] == '$':
- return name[1:]
- name = name.split('=')[0]
- if ':' in name:
- name = name.split(':')[1]
- name = name.replace('-', '_')
- legalname = name[0]
- for ch in name[1:]:
- if ch.isupper():
- legalname += '_'
- legalname += ch
- legalname = legalname.replace('__', '_').lower().replace('_m_d5', '_md5')
- return legalname
-
-def normalize_xml(xmlstr):
- if xmlstr:
- xmlstr = '>'.join(xml.strip() for xml in xmlstr.split('>'))
- xmlstr = '<'.join(xml.strip() for xml in xmlstr.split('<'))
- return xmlstr
-
-def to_multilines(statements):
- ret = statements.replace('\n', ' \\\n').strip()
- if ret.endswith(' \\'):
- ret = ret[:-2]
- return ret
-
-def get_output_str(name, value, validate_string):
- name = to_legalname(name)
- if value:
- return ''.join([name, '=\'', value, '\''])
- elif 'required' in validate_string:
- return name
- else:
- return name + '=None'
-
-def get_value_validates_comment(value_string):
- value = ''
- validate_string = ''
- comment = ''
- if ';' in value_string:
- value, value_string = value_string.split(';')[:2]
- if '#' in value_string:
- validate_string, comments = value_string.split('#')[:2]
- else:
- validate_string = value_string
- return value, validate_string, comment
-
-
-def output_import(output_file, class_name):
- indent = ' '
- output_str = 'import base64\n'
- output_str += 'import os\n'
- output_str += 'import urllib2\n\n'
-
- if 'ServiceBus' in class_name:
- output_str += 'from azure.http.httpclient import _HTTPClient\n'
- output_str += 'from azure.http import HTTPError\n'
- output_str += 'from azure.servicebus import (_update_service_bus_header, _create_message, \n'
- output_str += indent*8 + 'convert_topic_to_xml, _convert_response_to_topic, \n'
- output_str += indent*8 + 'convert_queue_to_xml, _convert_response_to_queue, \n'
- output_str += indent*8 + 'convert_subscription_to_xml, _convert_response_to_subscription, \n'
- output_str += indent*8 + 'convert_rule_to_xml, _convert_response_to_rule, \n'
- output_str += indent*8 + '_convert_xml_to_queue, _convert_xml_to_topic, \n'
- output_str += indent*8 + '_convert_xml_to_subscription, _convert_xml_to_rule,\n'
- output_str += indent*8 + '_service_bus_error_handler, AZURE_SERVICEBUS_NAMESPACE, \n'
- output_str += indent*8 + 'AZURE_SERVICEBUS_ACCESS_KEY, AZURE_SERVICEBUS_ISSUER)\n'
- else:
- output_str += 'from azure.storage import *\n'
- output_str += 'from azure.storage.storageclient import _StorageClient\n'
- if 'Blob' in class_name:
- output_str += 'from azure.storage import (_update_storage_blob_header, _create_blob_result,\n'
- output_str += indent*8 + 'convert_block_list_to_xml, convert_response_to_block_list) \n'
- elif 'Queue' in class_name:
- output_str += 'from azure.storage import (_update_storage_queue_header)\n'
- else:
- output_str += 'from azure.storage import (_update_storage_table_header, \n'
- output_str += indent*8 + 'convert_table_to_xml, _convert_xml_to_table,\n'
- output_str += indent*8 + 'convert_entity_to_xml, _convert_response_to_entity, \n'
- output_str += indent*8 + '_convert_xml_to_entity, _sign_storage_table_request)\n'
-
- if 'Table' in class_name:
- output_str += 'from azure.http.batchclient import _BatchClient\n'
- output_str += 'from azure.http import HTTPRequest\n'
- output_str += 'from azure import (_validate_not_none, Feed,\n'
- output_str += indent*8 + '_convert_response_to_feeds, _str_or_none, _int_or_none,\n'
- output_str += indent*8 + '_get_request_body, _update_request_uri_query, \n'
- output_str += indent*8 + '_dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError, \n'
- output_str += indent*8 + 'WindowsAzureError, _parse_response, _convert_class_to_xml, \n'
- output_str += indent*8 + '_parse_response_for_dict, _parse_response_for_dict_prefix, \n'
- output_str += indent*8 + '_parse_response_for_dict_filter, \n'
- output_str += indent*8 + '_parse_enum_results_list, _update_request_uri_query_local_storage, \n'
- output_str += indent*8 + '_get_table_host, _get_queue_host, _get_blob_host, \n'
- output_str += indent*8 + '_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape) \n\n'
-
- output_file.write(output_str)
-
-
-def output_class(output_file, class_name, class_comment, class_init_params, x_ms_version):
- indent = ' '
-
- if 'ServiceBus' in class_name:
- output_str = ''.join(['class ', class_name, ':\n'])
- else:
- output_str = ''.join(['class ', class_name, '(_StorageClient):\n'])
- if class_comment.strip():
- output_str += ''.join([indent, '\'\'\'\n', indent, class_comment.strip(), '\n', indent, '\'\'\'\n\n'])
- else:
- output_str += '\n'
-
- if 'Table' in class_name:
- output_str += ''.join([indent, 'def begin_batch(self):\n'])
- output_str += indent*2 + 'if self._batchclient is None:\n'
- output_str += indent*3 + 'self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)\n'
- output_str += ''.join([indent*2, 'return self._batchclient.begin_batch()\n\n'])
- output_str += ''.join([indent, 'def commit_batch(self):\n'])
- output_str += ''.join([indent*2, 'try:\n'])
- output_str += ''.join([indent*3, 'ret = self._batchclient.commit_batch()\n'])
- output_str += ''.join([indent*2, 'finally:\n'])
- output_str += indent*3 + 'self._batchclient = None\n'
- output_str += ''.join([indent*2, 'return ret\n\n'])
- output_str += ''.join([indent, 'def cancel_batch(self):\n'])
- output_str += indent*2 + 'self._batchclient = None\n\n'
-
- if not 'ServiceBus' in class_name:
- output_file.write(output_str)
- return
-
- if not 'service_namespace' in class_init_params:
- output_str += ''.join([indent, 'def begin_batch(self):\n'])
- output_str += ''.join([indent*2, 'self._httpclient.begin_batch()\n\n'])
- output_str += ''.join([indent, 'def commit_batch(self):\n'])
- output_str += ''.join([indent*2, 'self._httpclient.commit_batch()\n\n'])
- output_str += ''.join([indent, 'def cancel_batch(self):\n'])
- output_str += ''.join([indent*2, 'self._httpclient.cancel_batch()\n\n'])
-
- output_file.write(output_str)
-
-
-def output_method_def(method_name, method_params, uri_param, req_param, req_query, req_header):
- indent = ' '
- output_def = ''.join([indent, 'def ', method_name, '(self, '])
- for param in uri_param:
- output_def += param.build_sig()
-
- params = req_param + req_query + req_header
- ordered_params = []
- for name, value, validate_string, comment in params:
- if 'required' in validate_string:
- ordered_params.append((name, value, validate_string, comment))
- for name, value, validate_string, comment in params:
- if 'required' not in validate_string:
- ordered_params.append((name, value, validate_string, comment))
- output_def += ', '.join(get_output_str(name, value, validate_string) for name, value, validate_string, comment in ordered_params)
- if output_def.endswith(', '):
- output_def = output_def[:-2]
- for name, value in method_params:
- output_def += ''.join([', ', name, '=', value])
- output_def += '):\n'
-
- return output_def
-
-
-def output_method_comments(method_comment, req_param, req_query, req_header):
- indent = ' '
- output_comments = ''
- if method_comment.strip():
- output_comments += method_comment
- for name, value, validate_string, comment in (req_param + req_query + req_header):
- if comment:
- output_comments += ''.join([indent*2, name, ': ', comment.rstrip(), '\n'])
- if output_comments.strip():
- output_comments = ''.join([indent*2, '\'\'\'\n', output_comments.rstrip(), '\n', indent*2, '\'\'\'\n'])
- return output_comments
-
-
-def output_method_validates(uri_param, req_param, req_query, req_header):
- indent = ' '
- output_validates = ''
- for param in uri_param:
- output_validates += param.get_validation(indent)
-
- for name, value, validate_string, comment in (req_param + req_query + req_header):
- if not validate_string.strip():
- continue
- validates = validate_string.split(':')
- for validate in validates:
- if 'required' in validate:
- output_validates += ''.join([indent*2, '_validate_not_none(\'', to_legalname(name), '\', ', to_legalname(name), ')\n'])
- return output_validates
-
-
-HEADER_CONVERSION = {'x-ms-meta-name-values': '%s',
- }
-QUERY_CONVERSION = {'maxresults' : '_int_or_none(%s)',
- 'timeout' : '_int_or_none(%s)',
- '$top': '_int_or_none(%s)',}
-
-def output_headers(list_name, request_list):
- return output_list(list_name, request_list, HEADER_CONVERSION)
-
-def output_query(list_name, request_list):
- return output_list(list_name, request_list, QUERY_CONVERSION)
-
-def output_list(list_name, request_list, validate_conversions):
- indent = ' '
- output_list_str = ''
-
- if len(request_list) == 1:
- output_list_str += ''.join([indent*2, list_name, ' = [('])
- for name, value, validate_string, comment in request_list:
- validated = validate_conversions.get(name, '_str_or_none(%s)') % (to_legalname(name), )
-
- if 'base64' in validate_string:
- output_list_str += ''.join(['\'', name, '\', base64.b64encode(', validated, '), '])
- else:
- output_list_str += ''.join(['\'', name, '\', ', validated, ', '])
- output_list_str = ''.join([output_list_str[:-2], ')]\n'])
- elif len(request_list) > 1:
- output_list_str += ''.join([indent*2, list_name, ' = [\n'])
- for name, value, validate_string, comment in request_list:
- validated = validate_conversions.get(name, '_str_or_none(%s)') % (to_legalname(name), )
-
- if 'base64' in validate_string:
- output_list_str += ''.join([indent*3, '(\'', name, '\', base64.b64encode(', validated, ')),\n'])
- else:
- output_list_str += ''.join([indent*3, '(\'', name, '\', ', validated, '),\n'])
- output_list_str = ''.join([output_list_str[:-2], '\n', indent*3, ']\n'])
-
- return output_list_str
-
-
-def output_method_body(method_name, return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param):
- indent = ' '
- output_body = ''.join([indent*2, 'request = HTTPRequest()\n'])
-
- output_body += ''.join([indent*2, 'request.method = \'', req_method, '\'\n'])
-
- if BLOB_SERVICE_HOST_BASE in req_host:
- output_body += indent*2 + 'request.host = _get_blob_host(self.account_name, self.use_local_storage)\n'
- elif QUEUE_SERVICE_HOST_BASE in req_host:
- output_body += indent*2 + 'request.host = _get_queue_host(self.account_name, self.use_local_storage)\n'
- elif TABLE_SERVICE_HOST_BASE in req_host:
- output_body += indent*2 + 'request.host = _get_table_host(self.account_name, self.use_local_storage)\n'
- else:
- output_body += indent*2 + 'request.host = self.service_namespace + SERVICE_BUS_HOST_BASE\n'
-
- req_uri = req_uri.replace('', '\' + self.subscription_id + \'')
-
- for param in uri_param:
- req_uri, extra = param.build_uri(req_uri, 2)
-
- if extra:
- output_body += extra
-
- output_body += ''.join([indent*2, 'request.path = \'', req_uri, '\'\n'])
-
- output_body += output_headers('request.headers', req_header)
- output_body += output_query('request.query', req_query)
-
- for name, value, validate_string, comment in req_param:
- if name.startswith('feed:'):
- type = name.split(':')[1]
- output_body += ''.join([indent*2, 'request.body = _get_request_body(convert_' + type + '_to_xml(', to_legalname(name), '))\n'])
- break
- elif name.startswith('class:'):
- if 'block_list' in name:
- output_body += ''.join([indent*2, 'request.body = _get_request_body(convert_block_list_to_xml(', to_legalname(name), '))\n'])
- else:
- output_body += ''.join([indent*2, 'request.body = _get_request_body(_convert_class_to_xml(', to_legalname(name), '))\n'])
- break
- elif name.startswith('binary:'):
- if 'message' in name:
- output_body += indent*2 + 'request.headers = message.add_headers(request)\n'
- output_body += ''.join([indent*2, 'request.body = _get_request_body(', to_legalname(name), '.body)\n'])
- else:
- output_body += ''.join([indent*2, 'request.body = _get_request_body(', to_legalname(name), ')\n'])
- break
- else:
-
- fromstr = ''.join([validate_string, '', name, '>'])
- if value and comment:
- fromstr = ''.join([value, ';', validate_string, '#', comment])
- elif value:
- fromstr = ''.join([value, ';', validate_string])
- elif comment:
- fromstr = ''.join([validate_string, '#', comment])
-
- tostr = ''.join(['\'', ' + xml_escape(str(', to_legalname(name), ')) + ', '\'', name, '>'])
-
- req_body = req_body.replace(fromstr, tostr)
-
- if len(req_body.strip()) > 80:
- output_body += ''.join([indent*2, 'request.body = _get_request_body(\'', to_multilines(req_body.strip()), '\')\n'])
- elif req_body.strip():
- output_body += ''.join([indent*2, 'request.body = _get_request_body(\'', req_body.strip(), '\')\n'])
- if SERVICE_BUS_HOST_BASE in req_host:
- output_body += indent*2 + 'request.path, request.query = _update_request_uri_query(request)\n'
- else:
- output_body += indent*2 + 'request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)\n'
-
-
- if 'servicebus' in req_host:
- output_body += indent*2 + 'request.headers = _update_service_bus_header(request, self.account_key, self.issuer)\n'
- elif 'table.core.windows.net' in req_host:
- output_body += indent*2 + 'request.headers = _update_storage_table_header(request)\n'
- elif 'blob.core.windows.net' in req_host:
- output_body += indent*2 + 'request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)\n'
- elif 'queue.core.windows.net' in req_host:
- output_body += indent*2 + 'request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)\n'
-
- for name, value in method_params:
- if 'fail_on_exist' in name:
- if method_name == 'create_queue' and 'queue.core' in req_host: #QueueService create_queue
- output_body += indent*2 + 'if not ' + name + ':\n'
- output_body += indent*3 + 'try:\n'
- output_body += ''.join([indent*4, 'response = self._perform_request(request)\n'])
- output_body += ''.join([indent*4, 'if response.status == 204:\n'])
- output_body += ''.join([indent*5, 'return False\n'])
- output_body += ''.join([indent*4, 'return True\n'])
- output_body += indent*3 + 'except WindowsAzureError as e:\n'
- output_body += indent*4 + '_dont_fail_on_exist(e)\n'
- output_body += indent*4 + 'return False\n'
- output_body += indent*2 + 'else:\n'
- output_body += ''.join([indent*3, 'response = self._perform_request(request)\n'])
- output_body += ''.join([indent*3, 'if response.status == 204:\n'])
- output_body += ''.join([indent*4, 'raise WindowsAzureConflictError(azure._ERROR_CONFLICT)\n'])
- output_body += ''.join([indent*3, 'return True\n\n'])
- else:
- output_body += indent*2 + 'if not ' + name + ':\n'
- output_body += indent*3 + 'try:\n'
- output_body += ''.join([indent*4, 'self._perform_request(request)\n'])
- output_body += ''.join([indent*4, 'return True\n'])
- output_body += indent*3 + 'except WindowsAzureError as e:\n'
- output_body += indent*4 + '_dont_fail_on_exist(e)\n'
- output_body += indent*4 + 'return False\n'
- output_body += indent*2 + 'else:\n'
- output_body += ''.join([indent*3, 'self._perform_request(request)\n'])
- output_body += ''.join([indent*3, 'return True\n\n'])
- break
- elif 'fail_not_exist' in name:
- output_body += indent*2 + 'if not ' + name + ':\n'
- output_body += indent*3 + 'try:\n'
- output_body += ''.join([indent*4, 'self._perform_request(request)\n'])
- output_body += ''.join([indent*4, 'return True\n'])
- output_body += indent*3 + 'except WindowsAzureError as e:\n'
- output_body += indent*4 + '_dont_fail_not_exist(e)\n'
- output_body += indent*4 + 'return False\n'
- output_body += indent*2 + 'else:\n'
- output_body += ''.join([indent*3, 'self._perform_request(request)\n'])
- output_body += ''.join([indent*3, 'return True\n\n'])
- break
- else:
- output_body += ''.join([indent*2, 'response = self._perform_request(request)\n\n'])
-
- if return_type and return_type != 'None':
- if return_type.startswith('dict'):
- return_params = return_type.split('\n')
- if len(return_params) == 1:
- output_body += indent*2 + 'return _parse_response_for_dict(response)\n\n'
- elif len(return_params) == 2:
- value = return_params[1].split('=')[1]
- if return_params[1].startswith('prefix'):
- output_body += indent*2 + 'return _parse_response_for_dict_prefix(response, prefix=' + value +')\n\n'
- elif return_params[1].startswith('filter'):
- output_body += indent*2 + 'return _parse_response_for_dict_filter(response, filter=' + value + ')\n\n'
- elif return_type.endswith('EnumResults'):
- output_body += indent*2 + 'return _parse_enum_results_list(response, ' + return_type + ', "' + return_type[:-11] + 's", ' + return_type[:-11] + ')\n\n'
- elif return_type == 'PageList':
- output_body += indent*2 + 'return _parse_simple_list(response, PageList, PageRange, "page_ranges")'
- else:
- if return_type == 'BlobResult':
- output_body += indent*2 + 'return _create_blob_result(response)\n\n'
- elif return_type == 'Message':
- output_body += indent*2 + 'return _create_message(response, self)\n\n'
- elif return_type == 'str':
- output_body += indent*2 + 'return response.body\n\n'
- elif return_type == 'BlobBlockList':
- output_body += indent*2 + 'return convert_response_to_block_list(response)\n\n'
- elif 'Feed' in return_type:
- for name in ['table', 'entity', 'topic', 'subscription', 'queue', 'rule']:
- if name +'\'),' in return_type:
- convert_func = '_convert_xml_to_' + name
- output_body += indent*2 + 'return _convert_response_to_feeds(response, ' + convert_func + ')\n\n'
- break
- elif name in return_type:
- convert_func = '_convert_response_to_' + name
- output_body += indent*2 + 'return ' + convert_func + '(response)\n\n'
- break
- else:
- output_body += indent*2 + 'return _parse_response(response, ' + return_type + ')\n\n'
-
-
- return output_body
-
-
-def output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param):
- indent=' '
- output_str = ''
- output_str += output_method_def(method_name, method_params, uri_param, req_param, req_query, req_header)
- output_str += output_method_comments(method_comment, req_param, req_query, req_header)
- output_str += output_method_validates(uri_param, req_param, req_query, req_header)
- output_str += output_method_body(method_name, return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
- output_file.write(output_str)
-
-
-class UriBuilder(object):
- def __init__(self, value):
- self.uri_str = value
-
- def build_sig(self):
- name = self.uri_str
- if to_legalname(name) != 'subscription_id':
- if '=' in name:
- name, value = name.split('=')
- return ''.join([to_legalname(name), '=', value, ', '])
- else:
- return ''.join([to_legalname(name), ', '])
- return ''
-
-
- def build_uri(self, req_uri, indent):
- name = self.uri_str
- return req_uri.replace('<' + name + '>', '\' + str(' + to_legalname(name) + ') + \''), ''
-
- def get_validation(self, indent):
- name = self.uri_str.split('=')[0]
- if to_legalname(name) != 'subscription_id':
- return ''.join([indent*2, '_validate_not_none(\'', to_legalname(name), '\', ', to_legalname(name), ')\n'])
-
- return ''
-
-class OptionalUriBuilder(object):
- def __init__(self, value):
- self.value = value
- colon = self.value.find(':')
- self.name = self.value[1:colon]
- self.replacement = self.value[colon+1:].replace('[' + self.name + ']', '" + ' + self.name + ' + "')
-
- def build_sig(self):
- return self.name + ' = None, '
-
- def get_validation(self, indent):
- return ''
-
- def build_uri(self, req_uri, indent):
- extra = ((' ' * indent) + 'if {name} is not None:\n' +
- (' ' * (indent+1)) + 'uri_part_{name} = "{replacement}"\n' +
- (' ' * indent) + 'else:\n' +
- (' ' * (indent+1)) + 'uri_part_{name} = ""\n').format(name=self.name, replacement=self.replacement)
-
- return req_uri.replace('<' + self.value + '>', "' + uri_part_" + self.name + " + '"), extra
-
-def auto_codegen(source_filename, output_filename='output.py'):
- source_file = open(source_filename,'r')
- output_file = open(output_filename,'w')
- return_type = None
- indent = ' '
- method_name = ''
- req_host = ''
- req_method = ''
- req_uri = ''
- req_body = ''
- req_query = []
- req_header = []
- req_param = []
- uri_param = []
- host_param = ''
- class_init_params = []
- class_name = ''
- x_ms_version = ''
- class_comment = ''
- method_comment = ''
- req_protocol = ''
- method_params = []
- methods_code = ''
-
- line = source_file.readline().strip().lower()
- while True:
- if line == '[end]':
- break
- elif line == '[class]':
- if method_name != '':
- output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
- method_name = ''
- class_name = source_file.readline().strip()
- elif line == '[x-ms-version]':
- x_ms_version = source_file.readline().strip()
- elif line == '[class-comment]':
- while True:
- line = source_file.readline().strip()
- if line.startswith('['):
- break
- else:
- class_comment += ''.join([indent, line, '\n'])
- continue
- elif line == '[init]':
- while True:
- param_name = source_file.readline().strip()
- if param_name.startswith('['):
- line = param_name.strip()
- break
- elif param_name.strip():
- class_init_params.append(param_name.strip())
- output_import(output_file, class_name)
- output_class(output_file, class_name, class_comment, class_init_params, x_ms_version)
- class_name = ''
- x_ms_version = ''
- class_init_params = []
- class_comment = ''
- continue
- elif line == '[methods_code]':
- while True:
- line = source_file.readline()
- if line.startswith('['):
- line = line.strip()
- break
- else:
- methods_code += ''.join([indent, line])
- continue
- elif line == '[method]':
- if method_name != '':
- output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
- req_query = []
- req_header = []
- req_param = []
- req_body = ''
- return_type = None
- method_comment = ''
- method_params = []
- method_name = source_file.readline().strip()
- elif line == '[params]':
- method_params = []
- while True:
- param = source_file.readline().strip()
- if param.startswith('['):
- line = param.strip()
- break
- elif param.strip():
- name, value = param.split('=')
- method_params.append((name, value))
- continue
- elif line == '[comment]':
- while True:
- line = source_file.readline()
- if line.startswith('['):
- line = line.strip()
- break
- else:
- method_comment += ''.join([indent*2, line])
- continue
- elif line == '[return]':
- return_type = ''
- while True:
- line = source_file.readline()
- if line.startswith('['):
- line = line.strip()
- break
- else:
- return_type += line
- return_type = return_type.strip()
- continue
- elif line == '[url]':
- url = source_file.readline().strip()
- if 'https://' in url:
- req_protocol = 'https'
- else:
- req_protocol = 'http'
- req_host = url.split(' ')[1].split('//')[1].split('/')[0]
- host_param = ''
- if '<' in req_host:
- pos1 = req_host.find('<')
- pos2 = req_host.find('>')
- host_param = req_host[pos1+1:pos2]
-
- req_method = url.split(' ')[0]
- req_uri = url[url.find('//')+2:].replace(req_host, '')
-
- uri_param = []
- uri_path = req_uri
- while '<' in uri_path:
- pos1 = uri_path.find('<')
- pos2 = uri_path.find('>')
- uri_param_name = uri_path[pos1+1:pos2]
-
- if uri_param_name.startswith('?'):
- builder = OptionalUriBuilder(uri_param_name)
- else:
- builder = UriBuilder(uri_param_name)
-
- uri_param.append(builder)
- if pos2 < (len(uri_path)-1):
- uri_path = uri_path[pos2+1:]
- else:
- break
- elif line == '[query]':
- req_query = []
- while True:
- query = source_file.readline().strip()
- if query.startswith('['):
- line = query.strip()
- break
- elif query.strip():
- name, value = query.split('=')
- validate_string = ''
- comment = ''
- if '#' in value:
- pos = value.rfind('#')
- comment = value[pos+1:]
- value = value[:pos]
- if ';' in value:
- value, validate_string = value.split(';')
- req_query.append((name, value, validate_string, comment))
- continue
- elif line == '[requestheader]':
- req_header = []
- while True:
- header = source_file.readline().strip()
- if header.startswith('['):
- line = header.strip()
- break
- elif header.strip():
- name, value = header.split('=')
- validate_string = ''
- comment = ''
- if '#' in value:
- pos = value.rfind('#')
- comment = value[pos+1:]
- value = value[:pos]
- if ';' in value:
- value, validate_string = value.split(';')
- req_header.append((name, value, validate_string, comment))
- continue
- elif line == '[requestbody]':
- req_body = ''
- req_param = []
- while True:
- body = source_file.readline()
- if body.startswith('['):
- line = body.strip()
- break
- elif body.strip():
- req_body += body
-
- if req_body.startswith('class:') or req_body.startswith('binary:') or req_body.startswith('feed:'):
- name_value_string = req_body.strip()
- name = ''
- value_string = ''
- if ';' in name_value_string:
- name, value_string = name_value_string.split(';')
- else:
- name = name_value_string
- value, validate_string, comment = get_value_validates_comment(value_string)
- req_param.append((name, value, validate_string, comment))
- elif req_body.strip():
- newbody = normalize_xml(req_body)
- xmldoc = minidom.parseString(newbody)
- for xmlelement in xmldoc.childNodes[0].childNodes:
- value_string = xmlelement.firstChild.nodeValue
- value, validate_string, comment = get_value_validates_comment(value_string)
- req_param.append((xmlelement.nodeName, value, validate_string, comment))
- continue
- line = source_file.readline().strip().lower()
-
- output_method(output_file, method_name, method_params, method_comment, return_type, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
-
- output_file.write('\n' + methods_code)
- source_file.close()
- output_file.close()
-
-if __name__ == '__main__':
- auto_codegen('blob_input.txt', '../azure/storage/blobservice.py')
- auto_codegen('table_input.txt', '../azure/storage/tableservice.py')
- auto_codegen('queue_input.txt', '../azure/storage/queueservice.py')
- auto_codegen('servicebus_input.txt', '../azure/servicebus/servicebusservice.py')
-
- def add_license(license_str, output_file_name):
- output_file = open(output_file_name, 'r')
- content = output_file.read()
- output_file.close()
- output_file = open(output_file_name, 'w')
- output_file.write(license_str)
- output_file.write(content)
- output_file.close()
-
- license_str = '''#-------------------------------------------------------------------------
-# Copyright (c) Microsoft. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#--------------------------------------------------------------------------
-'''
-
- add_license(license_str, '../azure/storage/blobservice.py')
- add_license(license_str, '../azure/storage/tableservice.py')
- add_license(license_str, '../azure/storage/queueservice.py')
- add_license(license_str, '../azure/servicebus/servicebusservice.py')
\ No newline at end of file
diff --git a/src/codegenerator/codegenerator.pyproj b/src/codegenerator/codegenerator.pyproj
deleted file mode 100644
index ef749e81c078..000000000000
--- a/src/codegenerator/codegenerator.pyproj
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
- Debug
- 2.0
- {6ea33d82-ec4a-4e01-ba16-003e66b38e5b}
- .
- codegenerator.py
- C:\ptvs\Open_Source\Incubation\windowsazure
- .
- .
- codegenerator
- codegenerator
- SAK
- SAK
- SAK
- SAK
- False
- 2af0f10d-7135-4994-9156-5d01c9c11b7e
- 2.7
-
-
- true
- false
-
-
- true
- false
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/codegenerator/hostedservices_input.txt b/src/codegenerator/hostedservices_input.txt
deleted file mode 100644
index 42cd3c19e12f..000000000000
--- a/src/codegenerator/hostedservices_input.txt
+++ /dev/null
@@ -1,499 +0,0 @@
-[class]
-HostedServiceManager
-[x-ms-version]
-2011-08-18
-[init]
-cert_file
-
-[method]
-list_storage_accounts
-[return]
-StorageServices
-[url]
-GET https://management.core.windows.net//services/storageservices
-
-[method]
-get_storage_account_properties
-[return]
-StorageService
-[url]
-GET https://management.core.windows.net//services/storageservices/
-
-[method]
-get_storage_account_keys
-[return]
-StorageService
-[url]
-GET https://management.core.windows.net//services/storageservices//keys
-
-[method]
-regenerate_storage_account_keys
-[return]
-StorageService
-[url]
-POST https://management.core.windows.net//services/storageservices//keys?action=regenerate
-[requestbody]
-
-
- Primary|Secondary
-
-
-[method]
-create_storage_account
-[url]
-POST https://management.core.windows.net//services/storageservices
-[requestbody]
-
-
- service-name
- service-description
-
- affinity-group-name
- location-of-the-storage-account
-
-
-[method]
-delete_storage_account
-[url]
-DELETE https://management.core.windows.net//services/storageservices/
-
-[method]
-update_storage_account
-[url]
-PUT https://management.core.windows.net//services/storageservices/
-[requestbody]
-
-
- Description of the storage service
-
-
-
-[method]
-list_hosted_services
-[return]
-HostedServices
-[url]
-GET https://management.core.windows.net//services/hostedservices
-
-[method]
-delete_hosted_service
-[url]
-DELETE https://management.core.windows.net//services/hostedservices/
-
-[method]
-update_hosted_service
-[url]
-PUT https://management.core.windows.net//services/hostedservices/
-[requestbody]
-
-
-
- description
-
-
-[method]
-create_hosted_service
-[url]
-POST https://management.core.windows.net//services/hostedservices
-[requestbody]
-
-
- service-name
-
- description
- location
- affinity-group
-
-
-[method]
-get_hosted_service_properties
-[return]
-HostedService
-[url]
-GET https://management.core.windows.net//services/hostedservices/
-[query]
-embed-detail=false
-
-[method]
-create_deployment
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots/
-[requestbody]
-
-
- deployment-name
- package-url-in-blob-storage
-
- base64-encoded-configuration-file
- true|false
- true|false
-
-
-
-[method]
-get_deployment_by_slot
-[return]
-Deployment
-[url]
-GET https://management.core.windows.net//services/hostedservices//deploymentslots/
-
-[method]
-get_deployment_by_name
-[return]
-Deployment
-[url]
-GET https://management.core.windows.net//services/hostedservices//deployments/
-
-[method]
-swap_deployment
-[return]
-Deployment
-[url]
-POST https://management.core.windows.net//services/hostedservices/
-[requestbody]
-
-
- production-deployment-name
- deployment-name-to-be-swapped-with-production
-
-
-[method]
-delete_deployment_by_slot
-[url]
-DELETE https://management.core.windows.net//services/hostedservices//deploymentslots/
-
-[method]
-delete_deployment_by_name
-[url]
-DELETE https://management.core.windows.net//services/hostedservices//deployments/
-
-[method]
-change_deployment_configuration_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//?comp=config
-[requestbody]
-
-
- base-64-encoded-configuration-file
- true|false
- Auto|Manual
-
-
-[method]
-change_deployment_configuration_by_name
-[url]
-POST https://management.core.windows.net//services/hostedservices//deployments//?comp=config
-[requestbody]
-
-
- base-64-encoded-configuration-file
- true|false
- Auto|Manual
-
-
-[method]
-update_deployment_status_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//?comp=status
-[requestbody]
-
-
- Running|Suspended
-
-
-[method]
-update_deployment_status_by_name
-[url]
-POST https://management.core.windows.net//services/hostedservices//deployments//?comp=status
-[requestbody]
-
-
- Running|Suspended
-
-
-[method]
-upgrade_deployment_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//?comp=upgrade
-[requestbody]
-
-
- auto|manual
- url-to-package
- base64-encoded-config-file
-
- role-name
- true|false
-
-
-[method]
-upgrade_deployment_by_name
-[url]
-POST https://management.core.windows.net//services/hostedservices//deployments//?comp=upgrade
-[requestbody]
-
-
- auto|manual
- url-to-package
- base64-encoded-config-file
-
- role-name
- true|false
-
-
-[method]
-walk_upgrade_domain_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//?comp=walkupgradedomain
-[requestbody]
-
-
- upgrade-domain-id
-
-
-[method]
-walk_upgrade_domain_by_name
-[url]
-POST https://management.core.windows.net//services/hostedservices//deployments//?comp=walkupgradedomain
-[requestbody]
-
-
- upgrade-domain-id
-
-
-[method]
-reboot_role_instance_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//roleinstances/?comp=reboot
-[requestheader]
-Content-Length=0
-
-[method]
-reboot_role_instance_by_name
-[url]
-POST https://management.core.windows.net//services/hostedservices//deployments//roleinstances/?comp=reboot
-[requestheader]
-Content-Length=0
-
-[method]
-reimage_role_instance_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//roleinstances/?comp=reimage
-[requestheader]
-Content-Length=0
-
-[method]
-reimage_role_instance_by_name
-[url]
-POST https://management.core.windows.net//services/hostedservices//deployments//roleinstances/?comp=reimage
-[requestheader]
-Content-Length=0
-
-[method]
-rollback_update_by_slot
-[url]
-POST https://management.core.windows.net//services/hostedservices//deploymentslots//?comp=rollback
-[requestbody]
-
-
- auto|manual
- true|false
-
-
-[method]
-rollback_update_by_name
-[url]
-POST hhttps://management.core.windows.net//services/hostedservices//deployments//?comp=rollback
-[requestbody]
-
-
- auto|manual
- true|false
-
-
-[method]
-list_certificates
-[return]
-Certificates
-[url]
-GET https://management.core.windows.net//services/hostedservices//certificates
-
-[method]
-get_certificate
-[return]
-Certificate
-[url]
-GET https://management.core.windows.net//services/hostedservices//certificates/
-
-[method]
-add_certificate
-[return]
-Certificates
-[url]
-POST https://management.core.windows.net//services/hostedservices//certificates
-[requestbody]
-
-
- base64-encoded-pfx-file
- pfx
- pfx-file-password
-
-
-[method]
-delete_certificate
-[return]
-Certificates
-[url]
-DELETE https://management.core.windows.net//services/hostedservices//certificates/
-
-[method]
-list_affinity_groups
-[return]
-AffinityGroups
-[url]
-DELETE https://management.core.windows.net//affinitygroups
-
-[method]
-create_affinity_group
-[url]
-POST https://management.core.windows.net//affinitygroups
-[requestbody]
-
-
- affinity-group-name
-
- affinity-group-description
- location
-
-
-[method]
-delete_affinity_group
-[return]
-AffinityGroups
-[url]
-DELETE https://management.core.windows.net//affinitygroups/
-
-[method]
-update_affinity_group
-[url]
-PUT https://management.core.windows.net//affinitygroups/
-[requestbody]
-
-
-
- affinity-group-description
-
-
-[method]
-get_affinity_group_properties
-[return]
-AffinityGroup
-[url]
-GET https://management.core.windows.net//affinitygroups/
-
-[method]
-list_locations
-[return]
-Locations
-[url]
-GET https://management.core.windows.net//locations
-
-[method]
-get_operation_status
-[return]
-OperationStatus
-[url]
-GET https://management.core.windows.net//operations/
-
-[method]
-list_operating_systems
-[return]
-OperatingSystems
-[url]
-GET https://management.core.windows.net//operatingsystems
-
-[method]
-list_operating_system_families
-[return]
-OperatingSystemFamilies
-[url]
-GET https://management.core.windows.net//operatingsystemfamilies
-
-[method]
-list_subscription_operations
-[return]
-SubscriptionOperationCollection
-[url]
-GET https://management.core.windows.net//operations
-[query]
-StartTime=;required
-EndTime=;required
-ObjectIdFilter=
-OperationResultFilter=
-ContinuationToken=
-
-[method]
-get_subscription
-[return]
-Subscription
-[url]
-GET https://management.core.windows.net/
-
-[method]
-create_profile
-[url]
-POST https://management.core.windows.net//services/WATM/profiles
-[requestbody]
-
- [domain-name-for-the-profile]
- [service-profile-name]
-
-
-[method]
-list_profiles
-[return]
-Profiles
-[url]
-GET https://management.core.windows.net//services/WATM/profiles
-
-[method]
-get_profile
-[return]
-Profile
-[url]
-GET https://management.core.windows.net//services/WATM/profiles/
-
-[method]
-delete_profile
-[return]
-Profile
-[url]
-DELETE https://management.core.windows.net//services/WATM/profiles/
-
-[method]
-list_definitions
-[return]
-Definitions
-[url]
-GET https://management.core.windows.net//services/WATM/profiles//definitions
-
-[method]
-get_definition
-[return]
-Definition
-[url]
-GET https://management.core.windows.net//services/WATM/profiles//definitions/
-[requestbody]
-binary:blob
-
-[method]
-update_profile
-[return]
-[url]
-PUT https://management.core.windows.net//services/WATM/profiles/
-[requestbody]
-class:profile
-
-[end]
-
-
diff --git a/src/codegenerator/queue_input.txt b/src/codegenerator/queue_input.txt
deleted file mode 100644
index c1afd5655a5b..000000000000
--- a/src/codegenerator/queue_input.txt
+++ /dev/null
@@ -1,238 +0,0 @@
-[class]
-QueueService
-[x-ms-version]
-2011-08-18
-[class-comment]
-This is the main class managing queue resources.
-account_name: your storage account name, required for all operations.
-account_key: your storage account key, required for all operations.
-[init]
-account_name
-account_key
-
-[method]
-get_queue_service_properties
-[comment]
-Gets the properties of a storage account's Queue Service, including Windows Azure
-Storage Analytics.
-
-timeout: Optional. The timeout parameter is expressed in seconds. For example, the
-following value sets a timeout of 30 seconds for the request: timeout=30
-[return]
-StorageServiceProperties
-[url]
-GET http://.queue.core.windows.net/?restype=service&comp=properties
-[query]
-timeout=
-
-[method]
-list_queues
-[comment]
-Lists all of the queues in a given storage account.
-
-[return]
-QueueEnumResults
-[url]
-GET http://.queue.core.windows.net/?comp=list
-[query]
-prefix=
-marker=
-maxresults=
-include=
-
-[method]
-create_queue
-[comment]
-Creates a queue under the given account.
-
-queue_name: name of the queue.
-x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
- with the queue as metadata.
-fail_on_exist: specify whether throw exception when queue exists.
-[params]
-fail_on_exist=False
-[return]
-None
-[url]
-PUT http://.queue.core.windows.net/
-[requestheader]
-x-ms-meta-name-values=
-
-[method]
-delete_queue
-[comment]
-Permanently deletes the specified queue.
-
-queue_name: name of the queue.
-fail_not_exist: specify whether throw exception when queue doesn't exist.
-[params]
-fail_not_exist=False
-[return]
-None
-[url]
-DELETE http://.queue.core.windows.net/
-
-[method]
-get_queue_metadata
-[comment]
-Retrieves user-defined metadata and queue properties on the specified queue.
-Metadata is associated with the queue as name-values pairs.
-
-queue_name: name of the queue.
-[return]
-dict
-prefix='x-ms-meta'
-[url]
-GET http://.queue.core.windows.net/?comp=metadata
-
-[method]
-set_queue_metadata
-[comment]
-Sets user-defined metadata on the specified queue. Metadata is associated
-with the queue as name-value pairs.
-
-queue_name: name of the queue.
-x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
- with the queue as metadata.
-[url]
-PUT http://.queue.core.windows.net/?comp=metadata
-[requestheader]
-x-ms-meta-name-values=
-
-[method]
-put_message
-[comment]
-Adds a new message to the back of the message queue. A visibility timeout can
-also be specified to make the message invisible until the visibility timeout
-expires. A message must be in a format that can be included in an XML request
-with UTF-8 encoding. The encoded message can be up to 64KB in size for versions
-2011-08-18 and newer, or 8KB in size for previous versions.
-
-queue_name: name of the queue.
-visibilitytimeout: Optional. If specified, the request must be made using an
- x-ms-version of 2011-08-18 or newer.
-messagettl: Optional. Specifies the time-to-live interval for the message,
- in seconds. The maximum time-to-live allowed is 7 days. If this parameter
- is omitted, the default time-to-live is 7 days.
-[return]
-[url]
-POST http://.queue.core.windows.net//messages
-[query]
-visibilitytimeout=
-messagettl=
-[requestbody]
-
-
- required
-
-
-[method]
-get_messages
-[comment]
-Retrieves one or more messages from the front of the queue.
-
-queue_name: name of the queue.
-numofmessages: Optional. A nonzero integer value that specifies the number of
- messages to retrieve from the queue, up to a maximum of 32. If fewer are
- visible, the visible messages are returned. By default, a single message
- is retrieved from the queue with this operation.
-visibilitytimeout: Required. Specifies the new visibility timeout value, in
- seconds, relative to server time. The new value must be larger than or
- equal to 1 second, and cannot be larger than 7 days, or larger than 2
- hours on REST protocol versions prior to version 2011-08-18. The visibility
- timeout of a message can be set to a value later than the expiry time.
-[return]
-QueueMessagesList
-[url]
-GET http://.queue.core.windows.net//messages
-[query]
-numofmessages=
-visibilitytimeout=
-
-[method]
-peek_messages
-[comment]
-Retrieves one or more messages from the front of the queue, but does not alter
-the visibility of the message.
-
-queue_name: name of the queue.
-numofmessages: Optional. A nonzero integer value that specifies the number of
- messages to peek from the queue, up to a maximum of 32. By default,
- a single message is peeked from the queue with this operation.
-[return]
-QueueMessagesList
-[url]
-GET http://.queue.core.windows.net//messages?peekonly=true
-[query]
-numofmessages=
-
-[method]
-delete_message
-[comment]
-Deletes the specified message.
-
-queue_name: name of the queue.
-popreceipt: Required. A valid pop receipt value returned from an earlier call
- to the Get Messages or Update Message operation.
-[return]
-[url]
-DELETE http://.queue.core.windows.net//messages/
-[query]
-popreceipt=;required
-
-[method]
-clear_messages
-[comment]
-Deletes all messages from the specified queue.
-
-queue_name: name of the queue.
-[return]
-[url]
-DELETE http://.queue.core.windows.net//messages
-
-[method]
-update_message
-[comment]
-Updates the visibility timeout of a message. You can also use this
-operation to update the contents of a message.
-
-queue_name: name of the queue.
-popreceipt: Required. A valid pop receipt value returned from an earlier call
- to the Get Messages or Update Message operation.
-visibilitytimeout: Required. Specifies the new visibility timeout value, in
- seconds, relative to server time. The new value must be larger than or
- equal to 0, and cannot be larger than 7 days. The visibility timeout
- of a message cannot be set to a value later than the expiry time. A
- message can be updated until it has been deleted or has expired.
-[return]
-dict
-filter=['x-ms-popreceipt', 'x-ms-time-next-visible']
-[url]
-PUT http://.queue.core.windows.net//messages/
-[query]
-popreceipt=;required
-visibilitytimeout=;required
-[requestbody]
-
-
- required
-
-
-[method]
-set_queue_service_properties
-[comment]
-Sets the properties of a storage account's Queue service, including Windows Azure
-Storage Analytics.
-
-storage_service_properties: a StorageServiceProperties object.
-timeout: Optional. The timeout parameter is expressed in seconds.
-[return]
-[url]
-PUT http://.queue.core.windows.net/?restype=service&comp=properties
-[query]
-timeout=
-[requestbody]
-class:storage_service_properties;required
-
-[end]
-
diff --git a/src/codegenerator/servicebus_input.txt b/src/codegenerator/servicebus_input.txt
deleted file mode 100644
index 8aac7ac4edd5..000000000000
--- a/src/codegenerator/servicebus_input.txt
+++ /dev/null
@@ -1,478 +0,0 @@
-[class]
-ServiceBusService
-[x-ms-version]
-2011-06-01
-[init]
-service_namespace
-account_key
-issuer
-
-[method]
-create_queue
-[comment]
-Creates a new queue. Once created, this queue's resource manifest is immutable.
-
-queue: queue object to create.
-queue_name: the name of the queue.
-fail_on_exist: specify whether to throw an exception when the queue exists.
-[params]
-fail_on_exist=False
-[return]
-None
-[url]
-PUT https://.servicebus.windows.net/
-[requestbody]
-feed:queue
-
-[method]
-delete_queue
-[comment]
-Deletes an existing queue. This operation will also remove all associated state
-including messages in the queue.
-
-fail_not_exist: specify whether to throw an exception if the queue doesn't exist.
-[params]
-fail_not_exist=False
-[return]
-None
-[url]
-DELETE https://.servicebus.windows.net/
-
-[method]
-get_queue
-[comment]
-Retrieves an existing queue.
-
-queue_name: name of the queue.
-[return]
-Feed('queue')
-[url]
-GET https://.servicebus.windows.net/
-[requestheader]
-
-[method]
-list_queues
-[comment]
-Enumerates the queues in the service namespace.
-[return]
-(Feed('queue'),)
-[url]
-GET https://.servicebus.windows.net/$Resources/Queues
-[requestheader]
-
-[method]
-create_topic
-[comment]
-Creates a new topic. Once created, this topic resource manifest is immutable.
-
-topic_name: name of the topic.
-topic: the Topic object to create.
-fail_on_exist: specify whether to throw an exception when the topic exists.
-[params]
-fail_on_exist=False
-[return]
-None
-[url]
-PUT https://.servicebus.windows.net/
-[requestbody]
-feed:topic
-
-[method]
-delete_topic
-[comment]
-Deletes an existing topic. This operation will also remove all associated state
-including associated subscriptions.
-
-topic_name: name of the topic.
-fail_not_exist: specify whether throw exception when topic doesn't exist.
-[params]
-fail_not_exist=False
-[return]
-None
-[url]
-DELETE https://.servicebus.windows.net/
-
-[method]
-get_topic
-[comment]
-Retrieves the description for the specified topic.
-
-topic_name: name of the topic.
-[return]
-Feed('topic')
-[url]
-GET https://.servicebus.windows.net/
-[requestheader]
-
-[method]
-list_topics
-[comment]
-Retrieves the topics in the service namespace.
-[return]
-(Feed('topic'),)
-[url]
-GET https://.servicebus.windows.net/$Resources/Topics
-[requestheader]
-
-[method]
-create_rule
-[comment]
-Creates a new rule. Once created, this rule's resource manifest is immutable.
-
-topic_name: the name of the topic
-subscription_name: the name of the subscription
-rule_name: name of the rule.
-fail_on_exist: specify whether to throw an exception when the rule exists.
-[params]
-fail_on_exist=False
-[return]
-None
-[url]
-PUT https://.servicebus.windows.net//subscriptions//rules/
-[requestbody]
-feed:rule
-
-[method]
-delete_rule
-[comment]
-Deletes an existing rule.
-
-topic_name: the name of the topic
-subscription_name: the name of the subscription
-rule_name: the name of the rule. DEFAULT_RULE_NAME=$Default. Use DEFAULT_RULE_NAME
- to delete default rule for the subscription.
-fail_not_exist: specify whether throw exception when rule doesn't exist.
-[params]
-fail_not_exist=False
-[return]
-None
-[url]
-DELETE https://.servicebus.windows.net//subscriptions//rules/
-
-[method]
-get_rule
-[comment]
-Retrieves the description for the specified rule.
-
-topic_name: the name of the topic
-subscription_name: the name of the subscription
-rule_name: name of the rule
-[return]
-Feed('rule')
-[url]
-GET https://.servicebus.windows.net//subscriptions//rules/
-
-[method]
-list_rules
-[comment]
-Retrieves the rules that exist under the specified subscription.
-
-topic_name: the name of the topic
-subscription_name: the name of the subscription
-[return]
-(Feed('rule'),)
-[url]
-GET https://.servicebus.windows.net//subscriptions//rules/
-
-[method]
-create_subscription
-[comment]
-Creates a new subscription. Once created, this subscription resource manifest is
-immutable.
-
-topic_name: the name of the topic
-subscription_name: the name of the subscription
-fail_on_exist: specify whether throw exception when subscription exists.
-[params]
-fail_on_exist=False
-[return]
-None
-[url]
-PUT https://.servicebus.windows.net//subscriptions/