diff --git a/ChangeLog.txt b/ChangeLog.txt
index 986b301bebb4..38eac376ef8d 100644
--- a/ChangeLog.txt
+++ b/ChangeLog.txt
@@ -1,2 +1,6 @@
-2012-??-?? Version 0.9.0
- * Initial Release
\ No newline at end of file
+2012-10-16 Version 0.6.0
+ * Added service management API
+ * Added ability to specify custom hosts
+ * Added proxy server support (HTTP CONNECT tunneling)
+2012-06-06 Version 0.5.0
+ * Initial Release
diff --git a/README.md b/README.md
index 3bba4f23244a..02b8cbd0f6da 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,17 @@ Python Developer Center.
* Service Bus
* Queues: create, list and delete queues; create, list, and delete subscriptions; send, receive, unlock and delete messages
* Topics: create, list, and delete topics; create, list, and delete rules
+* Service Management
+ * storage accounts: create, update, delete, list, regenerate keys
+ * affinity groups: create, update, delete, list, get properties
+ * locations: list
+ * hosted services: create, update, delete, list, get properties
+ * deployment: create, get, delete, swap, change configuration, update status, upgrade, rollback
+ * role instance: reboot, reimage
+ * discover addresses and ports for the endpoints of other role instances in your service
+ * get configuration settings and access local resources
+ * get role instance information for current role and other role instances
+ * query and set the status of the current role
# Getting Started
## Download Source Code
@@ -193,6 +204,78 @@ sbs.send_topic_message('taskdiscussion', msg)
msg = sbs.receive_subscription_message('taskdiscussion', 'client1')
```
+
+## Service Management
+
+### Set-up certificates
+
+You need to create two certificates, one for the server (a .cer file) and one for the client (a .pem file). To create the .pem file using [OpenSSL](http://www.openssl.org), execute this:
+
+ openssl req -x509 -nodes -days 365 -newkey rsa:1024 -keyout mycert.pem -out mycert.pem
+
+To create the .cer certificate, execute this:
+
+ openssl x509 -inform pem -in mycert.pem -outform der -out mycert.cer
+
+### List Available Locations
+
+```Python
+locations = sms.list_locations()
+for location in locations:
+ print(location.name)
+```
+
+### Create a Storage Service
+
+To create a storage service, you need a name for the service (between 3 and 24 lowercase characters and unique within Windows Azure), a label (up to 100 characters, automatically encoded to base-64), and either a location or an affinity group.
+
+```Python
+name = "mystorageservice"
+desc = name
+label = name
+location = 'West US'
+
+result = sms.create_storage_account(name, desc, label, location=location)
+```
+
+
+### Create a Cloud Service
+
+A cloud service is also known as a hosted service (from earlier versions of Windows Azure). The **create_hosted_service** method allows you to create a new hosted service by providing a hosted service name (which must be unique in Windows Azure), a label (automatically encoded to base-64), and the location *or* the affinity group for your service.
+
+```Python
+name = "myhostedservice"
+desc = name
+label = name
+location = 'West US'
+
+result = sms.create_hosted_service(name, label, desc, location=location)
+```
+
+### Create a Deployment
+
+To make a new deployment to Azure you must store the package file in a Windows Azure Blob Storage account under the same subscription as the hosted service to which the package is being uploaded. You can create a deployment package with the [Windows Azure PowerShell cmdlets](https://www.windowsazure.com/en-us/develop/php/how-to-guides/powershell-cmdlets/), or with the [cspack commandline tool](http://msdn.microsoft.com/en-us/library/windowsazure/gg432988.aspx).
+
+```Python
+service_name = "myhostedservice"
+deployment_name = "v1"
+slot = 'Production'
+package_url = "URL_for_.cspkg_file"
+configuration = base64.b64encode(open(file_path, 'rb').read('path_to_.cscfg_file'))
+label = service_name
+
+result = sms.create_deployment(service_name,
+ slot,
+ deployment_name,
+ package_url,
+ label,
+ configuration)
+
+operation = sms.get_operation_status(result.request_id)
+print('Operation status: ' + operation.status)
+```
+
+
** For more examples please see the [Windows Azure Python Developer Center](http://www.windowsazure.com/en-us/develop/python) **
# Need Help?
diff --git a/src/azure.pyproj b/src/azure.pyproj
index c5a84ad4f3e5..c76335724cb3 100644
--- a/src/azure.pyproj
+++ b/src/azure.pyproj
@@ -20,6 +20,10 @@
2af0f10d-7135-4994-9156-5d01c9c11b7e
2.7
+ SAK
+ SAK
+ SAK
+ SAK
true
@@ -34,6 +38,8 @@
+
+
@@ -44,10 +50,12 @@
+
+
-
+
diff --git a/src/azure/__init__.py b/src/azure/__init__.py
index e23bd9069fc3..7ede4ce9b1c3 100644
--- a/src/azure/__init__.py
+++ b/src/azure/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
+import sys
import types
from datetime import datetime
from xml.dom import minidom
@@ -28,6 +29,7 @@
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
+MANAGEMENT_HOST = 'management.core.windows.net'
#Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
@@ -58,6 +60,8 @@
_ERROR_VALUE_SHOULD_NOT_BE_NULL = '%s should not be None.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = 'Cannot serialize the specified value (%s) to an entity. Please use an EntityProperty (which can specify custom types), int, str, bool, or datetime'
+_USER_AGENT_STRING = 'pyazure'
+
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
class WindowsAzureData(object):
@@ -84,6 +88,9 @@ def __init__(self, message):
class Feed:
pass
+class _Base64String(str):
+ pass
+
class HeaderDict(dict):
def __getitem__(self, index):
return super(HeaderDict, self).__getitem__(index.lower())
@@ -172,6 +179,16 @@ def _to_datetime(strtime):
'content_md5':'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
+ 'account_admin_live_email_id': 'AccountAdminLiveEmailId',
+ 'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
+ 'subscription_id': 'SubscriptionID',
+ 'fqdn': 'FQDN',
+ 'private_id': 'PrivateID',
+ 'os_virtual_hard_disk': 'OSVirtualHardDisk',
+ 'logical_disk_size_in_gb':'LogicalDiskSizeInGB',
+ 'logical_size_in_gb':'LogicalSizeInGB',
+ 'os':'OS',
+ 'persistent_vm_downtime_info':'PersistentVMDowntimeInfo',
}
def _get_serialization_name(element_name):
@@ -202,6 +219,18 @@ def _int_or_none(value):
return str(int(value))
+def _bool_or_none(value):
+ if value is None:
+ return None
+
+ if isinstance(value, bool):
+ if value:
+ return 'true'
+ else:
+ return 'false'
+
+ return str(value)
+
def _convert_class_to_xml(source, xml_prefix = True):
if source is None:
return ''
@@ -284,9 +313,28 @@ def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_SHOULD_NOT_BE_NULL % (param_name))
-def _fill_list_of(xmldoc, element_type):
- xmlelements = _get_child_nodes(xmldoc, element_type.__name__)
- return [_parse_response_body(xmlelement.toxml('utf-8'), element_type) for xmlelement in xmlelements]
+def _fill_list_of(xmldoc, element_type, xml_element_name):
+ xmlelements = _get_child_nodes(xmldoc, xml_element_name)
+ return [_parse_response_body_from_xml_node(xmlelement, element_type) for xmlelement in xmlelements]
+
+def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name, xml_element_name):
+ '''Converts an xml fragment into a list of scalar types. The parent xml element contains a
+ flat list of xml elements which are converted into the specified scalar type and added to the list.
+ Example:
+ xmldoc=
+
+ http://{storage-service-name}.blob.core.windows.net/
+ http://{storage-service-name}.queue.core.windows.net/
+ http://{storage-service-name}.table.core.windows.net/
+
+ element_type=str
+ parent_xml_element_name='Endpoints'
+ xml_element_name='Endpoint'
+ '''
+ xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
+ if xmlelements:
+ xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
+ return [_get_node_value(xmlelement, element_type) for xmlelement in xmlelements]
def _fill_dict(xmldoc, element_name):
xmlelements = _get_child_nodes(xmldoc, element_name)
@@ -297,6 +345,43 @@ def _fill_dict(xmldoc, element_name):
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
+def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name, key_xml_element_name, value_xml_element_name):
+ '''Converts an xml fragment into a dictionary. The parent xml element contains a
+ list of xml elements where each element has a child element for the key, and another for the value.
+ Example:
+ xmldoc=
+
+
+ Ext1
+ Val1
+
+
+ Ext2
+ Val2
+
+
+ element_type=str
+ parent_xml_element_name='ExtendedProperties'
+ pair_xml_element_name='ExtendedProperty'
+ key_xml_element_name='Name'
+ value_xml_element_name='Value'
+ '''
+ return_obj = { }
+
+ xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
+ if xmlelements:
+ xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
+ for pair in xmlelements:
+ keys = _get_child_nodes(pair, key_xml_element_name)
+ values = _get_child_nodes(pair, value_xml_element_name)
+ if keys and values:
+ key = str(keys[0].firstChild.nodeValue)
+ value = str(values[0].firstChild.nodeValue)
+
+ return_obj[key] = value
+
+ return return_obj
+
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type. The child name
'''
@@ -312,7 +397,7 @@ def _fill_instance_child(xmldoc, element_name, return_type):
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
- return _parse_response_body(element.toxml('utf-8'), return_type)
+ return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
@@ -332,6 +417,15 @@ def _fill_data_minidom(xmldoc, element_name, data_member):
else:
return type(data_member)(value)
+def _get_node_value(xmlelement, data_type):
+ value = xmlelement.firstChild.nodeValue
+ if data_type is datetime:
+ return _to_datetime(value)
+ elif data_type is types.BooleanType:
+ return value.lower() != 'false'
+ else:
+ return data_type(value)
+
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
@@ -401,22 +495,42 @@ def _parse_response(response, return_type):
'''
parse the HTTPResponse's body and fill all the data into a class of return_type
'''
- return _parse_response_body(response.body, return_type)
+ return _parse_response_body_from_xml_text(response.body, return_type)
def _fill_data_to_return_object(node, return_obj):
- for name, value in vars(return_obj).iteritems():
+ members = dict(vars(return_obj))
+ for name, value in members.iteritems():
if isinstance(value, _list_of):
- setattr(return_obj, name, _fill_list_of(node, value.list_type))
+ setattr(return_obj, name, _fill_list_of(node, value.list_type, value.xml_element_name))
+ elif isinstance(value, _scalar_list_of):
+ setattr(return_obj, name, _fill_scalar_list_of(node, value.list_type, _get_serialization_name(name), value.xml_element_name))
+ elif isinstance(value, _dict_of):
+ setattr(return_obj, name, _fill_dict_of(node, _get_serialization_name(name), value.pair_xml_element_name, value.key_xml_element_name, value.value_xml_element_name))
elif isinstance(value, WindowsAzureData):
setattr(return_obj, name, _fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj, name, _fill_dict(node, _get_serialization_name(name)))
+ elif isinstance(value, _Base64String):
+ value = _fill_data_minidom(node, name, '')
+ if value is not None:
+ value = base64.b64decode(value)
+ #always set the attribute, so we don't end up returning an object with type _Base64String
+ setattr(return_obj, name, value)
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
-def _parse_response_body(respbody, return_type):
+def _parse_response_body_from_xml_node(node, return_type):
+ '''
+ parse the xml and fill all the data into a class of return_type
+ '''
+ return_obj = return_type()
+ _fill_data_to_return_object(node, return_obj)
+
+ return return_obj
+
+def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
@@ -427,11 +541,31 @@ def _parse_response_body(respbody, return_type):
return return_obj
+class _dict_of(dict):
+ """a dict which carries with it the xml element names for key,val.
+ Used for deserializaion and construction of the lists"""
+ def __init__(self, pair_xml_element_name, key_xml_element_name, value_xml_element_name):
+ self.pair_xml_element_name = pair_xml_element_name
+ self.key_xml_element_name = key_xml_element_name
+ self.value_xml_element_name = value_xml_element_name
+
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
- def __init__(self, list_type):
+ def __init__(self, list_type, xml_element_name=None):
+ self.list_type = list_type
+ if xml_element_name is None:
+ self.xml_element_name = list_type.__name__
+ else:
+ self.xml_element_name = xml_element_name
+
+class _scalar_list_of(list):
+ """a list of scalar types which carries with it the type that's
+ expected to go in it along with its xml element name.
+ Used for deserializaion and construction of the lists"""
+ def __init__(self, list_type, xml_element_name):
self.list_type = list_type
+ self.xml_element_name = xml_element_name
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
@@ -447,16 +581,12 @@ def _update_request_uri_query(request):
appear after the existing parameters'''
if '?' in request.path:
- pos = request.path.find('?')
- query_string = request.path[pos+1:]
- request.path = request.path[:pos]
+ request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
- pos = query.find('=')
- name = query[:pos]
- value = query[pos+1:]
+ name, _, value = query.partition('=')
request.query.append((name, value))
request.path = urllib2.quote(request.path, '/()$=\',')
@@ -484,6 +614,18 @@ def _dont_fail_not_exist(error):
return False
else:
raise error
+
+def _general_error_handler(http_error):
+ ''' Simple error handler for azure.'''
+ if http_error.status == 409:
+ raise WindowsAzureConflictError(_ERROR_CONFLICT)
+ elif http_error.status == 404:
+ raise WindowsAzureMissingResourceError(_ERROR_NOT_FOUND)
+ else:
+ if http_error.respbody is not None:
+ raise WindowsAzureError(_ERROR_UNKNOWN % http_error.message + '\n' + http_error.respbody)
+ else:
+ raise WindowsAzureError(_ERROR_UNKNOWN % http_error.message)
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard http headers.'''
@@ -530,23 +672,3 @@ def _parse_response_for_dict_filter(response, filter):
return return_dict
else:
return None
-
-def _get_table_host(account_name, use_local_storage=False):
- ''' Gets service host base on the service type and whether it is using local storage. '''
-
- if use_local_storage:
- return DEV_TABLE_HOST
- else:
- return account_name + TABLE_SERVICE_HOST_BASE
-
-def _get_queue_host(account_name, use_local_storage=False):
- if use_local_storage:
- return DEV_QUEUE_HOST
- else:
- return account_name + QUEUE_SERVICE_HOST_BASE
-
-def _get_blob_host(account_name, use_local_storage=False):
- if use_local_storage:
- return DEV_BLOB_HOST
- else:
- return account_name + BLOB_SERVICE_HOST_BASE
diff --git a/src/azure/http/__init__.py b/src/azure/http/__init__.py
index 3aeb36ebe1f2..92ccaeb87fbb 100644
--- a/src/azure/http/__init__.py
+++ b/src/azure/http/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/src/azure/http/batchclient.py b/src/azure/http/batchclient.py
index f0eca01564d0..66f3085208fe 100644
--- a/src/azure/http/batchclient.py
+++ b/src/azure/http/batchclient.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,8 +28,8 @@ class _BatchClient(_HTTPClient):
It only supports one changeset.
'''
- def __init__(self, service_instance, account_key, account_name, x_ms_version=None, protocol='http'):
- _HTTPClient.__init__(self, service_instance, account_name=account_name, account_key=account_key, x_ms_version=x_ms_version, protocol=protocol)
+ def __init__(self, service_instance, account_key, account_name, protocol='http'):
+ _HTTPClient.__init__(self, service_instance, account_name=account_name, account_key=account_key, protocol=protocol)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
diff --git a/src/azure/http/httpclient.py b/src/azure/http/httpclient.py
index fb572592cb72..3cc85c7e693e 100644
--- a/src/azure/http/httpclient.py
+++ b/src/azure/http/httpclient.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -26,13 +26,14 @@
from xml.dom import minidom
from azure.http import HTTPError, HTTPResponse
+from azure import _USER_AGENT_STRING
class _HTTPClient:
'''
Takes the request and sends it to cloud service and returns the response.
'''
- def __init__(self, service_instance, cert_file=None, account_name=None, account_key=None, service_namespace=None, issuer=None, x_ms_version=None, protocol='https'):
+ def __init__(self, service_instance, cert_file=None, account_name=None, account_key=None, service_namespace=None, issuer=None, protocol='https'):
'''
service_instance: service client instance.
cert_file: certificate file name/location. This is only used in hosted service management.
@@ -40,7 +41,6 @@ def __init__(self, service_instance, cert_file=None, account_name=None, account_
account_key: the storage account access key for storage services or servicebus access key for service bus service.
service_namespace: the service namespace for service bus.
issuer: the issuer for service bus service.
- x_ms_version: the x_ms_version for the service.
'''
self.service_instance = service_instance
self.status = None
@@ -51,8 +51,14 @@ def __init__(self, service_instance, cert_file=None, account_name=None, account_
self.account_key = account_key
self.service_namespace = service_namespace
self.issuer = issuer
- self.x_ms_version = x_ms_version
self.protocol = protocol
+ self.proxy_host = None
+ self.proxy_port = None
+
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self.proxy_host = host
+ self.proxy_port = port
def get_connection(self, request):
''' Create connection for the request. '''
@@ -67,12 +73,17 @@ def get_connection(self, request):
_connection = httplib.HTTPConnection(request.host)
else:
_connection = httplib.HTTPSConnection(request.host, cert_file=self.cert_file)
+
+ if self.proxy_host:
+ _connection.set_tunnel(self.proxy_host, self.proxy_port)
+
return _connection
def send_request_headers(self, connection, request_headers):
for name, value in request_headers:
if value:
connection.putheader(name, value)
+ connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
def send_request_body(self, connection, request_body):
diff --git a/src/azure/http/winhttp.py b/src/azure/http/winhttp.py
index 4340389214aa..139654f9cf53 100644
--- a/src/azure/http/winhttp.py
+++ b/src/azure/http/winhttp.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,6 +33,8 @@
VT_UI8 = 21
VT_ARRAY = 8192
+HTTPREQUEST_PROXYSETTING_PROXY = 2
+
HTTPREQUEST_PROXY_SETTING = c_long
HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long
#------------------------------------------------------------------------------
@@ -52,12 +54,13 @@
_SysFreeString = _oleaut32.SysFreeString
_SysFreeString.argtypes = [c_void_p]
-_SafeArrayDestroy = _oleaut32.SafeArrayDestroy
-_SafeArrayDestroy.argtypes = [c_void_p]
-
_CoTaskMemAlloc = _ole32.CoTaskMemAlloc
_CoTaskMemAlloc.restype = c_void_p
_CoTaskMemAlloc.argtypes = [c_size_t]
+
+_CoTaskMemFree = _ole32.CoTaskMemFree
+_CoTaskMemFree.argtypes = [c_void_p]
+
#------------------------------------------------------------------------------
class BSTR(c_wchar_p):
@@ -87,8 +90,7 @@ class _tagSAFEARRAYBOUND(Structure):
('rgsabound', _tagSAFEARRAYBOUND*1)]
def __del__(self):
- _SafeArrayDestroy(self.pvdata)
- pass
+ _CoTaskMemFree(self.pvdata)
class VARIANT(Structure):
'''
@@ -250,6 +252,24 @@ def set_client_certificate(self, certificate):
_certificate = BSTR(certificate)
_WinHttpRequest._SetClientCertificate(self, _certificate)
+ def set_tunnel(self, host, port):
+ ''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''
+ url = host
+ if port:
+ url = url + u':' + port
+
+ var_host = VARIANT()
+ var_host.vt = VT_BSTR
+ var_host.vdata.bstrval = BSTR(url)
+
+ var_empty = VARIANT()
+ var_empty.vt = VT_EMPTY
+ var_empty.vdata.llval = 0
+
+ _WinHttpRequest._SetProxy(self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)
+
+ _SysFreeString(var_host.vdata.bstrval)
+
def __del__(self):
if self.value is not None:
_WinHttpRequest._Release(self)
@@ -288,6 +308,10 @@ def __init__(self, host, cert_file=None, key_file=None, protocol='http'):
_CoInitialize(None)
_CoCreateInstance(byref(clsid), 0, 1, byref(iid), byref(self._httprequest))
+ def set_tunnel(self, host, port=None):
+ ''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''
+ self._httprequest.set_tunnel(unicode(host), unicode(str(port)))
+
def putrequest(self, method, uri):
''' Connects to host and sends the request. '''
@@ -338,7 +362,3 @@ def getresponse(self):
length = len(body)
return _Response(status, status_text, length, headers, body)
-
-
-
-
diff --git a/src/azure/servicebus/__init__.py b/src/azure/servicebus/__init__.py
index 3cca653ab3d5..a42a19a99c5d 100644
--- a/src/azure/servicebus/__init__.py
+++ b/src/azure/servicebus/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,11 +22,12 @@
from azure.http import HTTPError
-from azure import (WindowsAzureError, WindowsAzureData,
+from azure import (WindowsAzureError, WindowsAzureData, _general_error_handler,
_create_entry, _get_entry_properties, xml_escape,
_get_child_nodes, WindowsAzureMissingResourceError,
WindowsAzureConflictError, _get_serialization_name,
- _get_children_from_path, _get_first_child_node_value)
+ _get_children_from_path, _get_first_child_node_value,
+ _USER_AGENT_STRING)
import azure
#default rule name for subscription
@@ -267,6 +268,7 @@ def _get_token(request, account_key, issuer):
connection = httplib.HTTPSConnection(host)
connection.putrequest('POST', '/WRAPv0.9')
connection.putheader('Content-Length', len(request_body))
+ connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
connection.send(request_body)
resp = connection.getresponse()
@@ -688,12 +690,6 @@ def convert_queue_to_xml(queue):
def _service_bus_error_handler(http_error):
''' Simple error handler for service bus service. Will add more specific cases '''
-
- if http_error.status == 409:
- raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
- elif http_error.status == 404:
- raise WindowsAzureMissingResourceError(azure._ERROR_NOT_FOUND)
- else:
- raise WindowsAzureError(azure._ERROR_UNKNOWN % http_error.message)
+ return _general_error_handler(http_error)
from azure.servicebus.servicebusservice import ServiceBusService
diff --git a/src/azure/servicebus/servicebusservice.py b/src/azure/servicebus/servicebusservice.py
index ccd4d2f9d13c..81f6ed1bba8b 100644
--- a/src/azure/servicebus/servicebusservice.py
+++ b/src/azure/servicebus/servicebusservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,7 +36,6 @@
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class ServiceBusService:
@@ -52,7 +51,7 @@ def create_queue(self, queue_name, queue=None, fail_on_exist=False):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.body = _get_request_body(convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
@@ -78,7 +77,7 @@ def delete_queue(self, queue_name, fail_not_exist=False):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -102,7 +101,7 @@ def get_queue(self, queue_name):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -116,7 +115,7 @@ def list_queues(self):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -135,7 +134,7 @@ def create_topic(self, topic_name, topic=None, fail_on_exist=False):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + ''
request.body = _get_request_body(convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
@@ -162,7 +161,7 @@ def delete_topic(self, topic_name, fail_not_exist=False):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -186,7 +185,7 @@ def get_topic(self, topic_name):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -200,7 +199,7 @@ def list_topics(self):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -222,7 +221,7 @@ def create_rule(self, topic_name, subscription_name, rule_name, rule=None, fail_
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.body = _get_request_body(convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
@@ -253,7 +252,7 @@ def delete_rule(self, topic_name, subscription_name, rule_name, fail_not_exist=F
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -281,7 +280,7 @@ def get_rule(self, topic_name, subscription_name, rule_name):
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -300,7 +299,7 @@ def list_rules(self, topic_name, subscription_name):
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -321,7 +320,7 @@ def create_subscription(self, topic_name, subscription_name, subscription=None,
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.body = _get_request_body(convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
@@ -349,7 +348,7 @@ def delete_subscription(self, topic_name, subscription_name, fail_not_exist=Fals
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -375,7 +374,7 @@ def get_subscription(self, topic_name, subscription_name):
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -392,7 +391,7 @@ def list_subscriptions(self, topic_name):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -413,7 +412,7 @@ def send_topic_message(self, topic_name, message=None):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body(message.body)
@@ -441,7 +440,7 @@ def peek_lock_subscription_message(self, topic_name, subscription_name, timeout=
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -470,7 +469,7 @@ def unlock_subscription_message(self, topic_name, subscription_name, sequence_nu
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -490,7 +489,7 @@ def read_delete_subscription_message(self, topic_name, subscription_name, timeou
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -518,7 +517,7 @@ def delete_subscription_message(self, topic_name, subscription_name, sequence_nu
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -537,7 +536,7 @@ def send_queue_message(self, queue_name, message=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body(message.body)
@@ -562,7 +561,7 @@ def peek_lock_queue_message(self, queue_name, timeout='60'):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -589,7 +588,7 @@ def unlock_queue_message(self, queue_name, sequence_number, lock_token):
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -607,7 +606,7 @@ def read_delete_queue_message(self, queue_name, timeout='60'):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -633,7 +632,7 @@ def delete_queue_message(self, queue_name, sequence_number, lock_token):
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -652,11 +651,13 @@ def receive_subscription_message(self, topic_name, subscription_name, peek_lock=
else:
return self.read_delete_subscription_message(topic_name, subscription_name, timeout)
- def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01'):
+ def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE):
+ #x_ms_version is not used, but the parameter is kept for backwards compatibility
self.requestid = None
self.service_namespace = service_namespace
self.account_key = account_key
- self.issuer = issuer
+ self.issuer = issuer
+ self.host_base = host_base
#get service namespace, account key and issuer. If they are set when constructing, then use them.
#else find them from environment variables.
@@ -673,8 +674,7 @@ def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_v
if not self.service_namespace or not self.account_key or not self.issuer:
raise WindowsAzureError('You need to provide servicebus namespace, access key and Issuer')
- self.x_ms_version = x_ms_version
- self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer, x_ms_version=self.x_ms_version)
+ self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
@@ -685,7 +685,7 @@ def with_filter(self, filter):
request, pass it off to the next lambda, and then perform any post-processing
on the response.'''
res = ServiceBusService(self.service_namespace, self.account_key,
- self.issuer, self.x_ms_version)
+ self.issuer)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
@@ -693,6 +693,13 @@ def new_filter(request):
res._filter = new_filter
return res
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ def _get_host(self):
+ return self.service_namespace + self.host_base
+
def _perform_request(self, request):
try:
resp = self._filter(request)
diff --git a/src/azure/servicemanagement/__init__.py b/src/azure/servicemanagement/__init__.py
new file mode 100644
index 000000000000..5fe33fb49858
--- /dev/null
+++ b/src/azure/servicemanagement/__init__.py
@@ -0,0 +1,1067 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+import base64
+from azure.http import HTTPError
+from azure import (WindowsAzureError, WindowsAzureData, _general_error_handler,
+ _create_entry, _get_entry_properties, xml_escape,
+ _get_child_nodes, WindowsAzureMissingResourceError,
+ WindowsAzureConflictError, _get_serialization_name,
+ _list_of, _scalar_list_of, _dict_of, _Base64String,
+ _get_children_from_path, _get_first_child_node_value)
+import azure
+
+#-----------------------------------------------------------------------------
+# Constants for Azure app environment settings.
+AZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE'
+AZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID'
+
+#x-ms-version for service management.
+X_MS_VERSION = '2012-03-01'
+
+#-----------------------------------------------------------------------------
+# Data classes
+
+class StorageServices(WindowsAzureData):
+ def __init__(self):
+ self.storage_services = _list_of(StorageService)
+
+ def __iter__(self):
+ return iter(self.storage_services)
+
+ def __len__(self):
+ return len(self.storage_services)
+
+ def __getitem__(self, index):
+ return self.storage_services[index]
+
+class StorageService(WindowsAzureData):
+ def __init__(self):
+ self.url = ''
+ self.service_name = ''
+ self.storage_service_properties = StorageAccountProperties()
+ self.storage_service_keys = StorageServiceKeys()
+ self.extended_properties = _dict_of('ExtendedProperty', 'Name', 'Value')
+ self.capabilities = _scalar_list_of(str, 'Capability')
+
+class StorageAccountProperties(WindowsAzureData):
+ def __init__(self):
+ self.description = ''
+ self.affinity_group = ''
+ self.location = ''
+ self.label = _Base64String()
+ self.status = ''
+ self.endpoints = _scalar_list_of(str, 'Endpoint')
+ self.geo_replication_enabled = False
+ self.geo_primary_region = ''
+ self.status_of_primary = ''
+ self.geo_secondary_region = ''
+ self.status_of_secondary = ''
+ self.last_geo_failover_time = ''
+
+class StorageServiceKeys(WindowsAzureData):
+ def __init__(self):
+ self.primary = ''
+ self.secondary = ''
+
+class Locations(WindowsAzureData):
+ def __init__(self):
+ self.locations = _list_of(Location)
+
+ def __iter__(self):
+ return iter(self.locations)
+
+ def __len__(self):
+ return len(self.locations)
+
+ def __getitem__(self, index):
+ return self.locations[index]
+
+class Location(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.display_name = ''
+ self.available_services = _scalar_list_of(str, 'AvailableService')
+
+class AffinityGroup(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.label = _Base64String()
+ self.description = ''
+ self.location = ''
+ self.hosted_services = HostedServices()
+ self.storage_services = StorageServices()
+ self.capabilities = _scalar_list_of(str, 'Capability')
+
+class AffinityGroups(WindowsAzureData):
+ def __init__(self):
+ self.affinity_groups = _list_of(AffinityGroup)
+
+ def __iter__(self):
+ return iter(self.affinity_groups)
+
+ def __len__(self):
+ return len(self.affinity_groups)
+
+ def __getitem__(self, index):
+ return self.affinity_groups[index]
+
+class HostedServices(WindowsAzureData):
+ def __init__(self):
+ self.hosted_services = _list_of(HostedService)
+
+ def __iter__(self):
+ return iter(self.hosted_services)
+
+ def __len__(self):
+ return len(self.hosted_services)
+
+ def __getitem__(self, index):
+ return self.hosted_services[index]
+
+class HostedService(WindowsAzureData):
+ def __init__(self):
+ self.url = ''
+ self.service_name = ''
+ self.hosted_service_properties = HostedServiceProperties()
+ self.deployments = Deployments()
+
+class HostedServiceProperties(WindowsAzureData):
+ def __init__(self):
+ self.description = ''
+ self.location = ''
+ self.affinity_group = ''
+ self.label = _Base64String()
+ self.status = ''
+ self.date_created = ''
+ self.date_last_modified = ''
+ self.extended_properties = _dict_of('ExtendedProperty', 'Name', 'Value')
+
+class Deployments(WindowsAzureData):
+ def __init__(self):
+ self.deployments = _list_of(Deployment)
+
+ def __iter__(self):
+ return iter(self.deployments)
+
+ def __len__(self):
+ return len(self.deployments)
+
+ def __getitem__(self, index):
+ return self.deployments[index]
+
+class Deployment(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.deployment_slot = ''
+ self.private_id = ''
+ self.status = ''
+ self.label = _Base64String()
+ self.url = ''
+ self.configuration = _Base64String()
+ self.role_instance_list = RoleInstanceList()
+ self.upgrade_status = UpgradeStatus()
+ self.upgrade_domain_count = ''
+ self.role_list = RoleList()
+ self.sdk_version = ''
+ self.input_endpoint_list = InputEndpoints()
+ self.locked = False
+ self.rollback_allowed = False
+ self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
+ self.created_time = ''
+ self.last_modified_time = ''
+ self.extended_properties = _dict_of('ExtendedProperty', 'Name', 'Value')
+
+class RoleInstanceList(WindowsAzureData):
+ def __init__(self):
+ self.role_instances = _list_of(RoleInstance)
+
+ def __iter__(self):
+ return iter(self.role_instances)
+
+ def __len__(self):
+ return len(self.role_instances)
+
+ def __getitem__(self, index):
+ return self.role_instances[index]
+
+class RoleInstance(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.instance_name = ''
+ self.instance_status = ''
+ self.instance_upgrade_domain = 0
+ self.instance_fault_domain = 0
+ self.instance_size = ''
+ self.instance_state_details = ''
+ self.instance_error_code = ''
+ self.ip_address = ''
+ self.power_state = ''
+ self.fqdn = ''
+
+class UpgradeStatus(WindowsAzureData):
+ def __init__(self):
+ self.upgrade_type = ''
+ self.current_upgrade_domain_state = ''
+ self.current_upgrade_domain = ''
+
+class InputEndpoints(WindowsAzureData):
+ def __init__(self):
+ self.input_endpoints = _list_of(InputEndpoint)
+
+ def __iter__(self):
+ return iter(self.input_endpoints)
+
+ def __len__(self):
+ return len(self.input_endpoints)
+
+ def __getitem__(self, index):
+ return self.input_endpoints[index]
+
+class InputEndpoint(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.vip = ''
+ self.port = ''
+
+class RoleList(WindowsAzureData):
+ def __init__(self):
+ self.roles = _list_of(Role)
+
+ def __iter__(self):
+ return iter(self.roles)
+
+ def __len__(self):
+ return len(self.roles)
+
+ def __getitem__(self, index):
+ return self.roles[index]
+
+class Role(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.os_version = ''
+
+class PersistentVMDowntimeInfo(WindowsAzureData):
+ def __init__(self):
+ self.start_time = ''
+ self.end_time = ''
+ self.status = ''
+
+class Certificates(WindowsAzureData):
+ def __init__(self):
+ self.certificates = _list_of(Certificate)
+
+ def __iter__(self):
+ return iter(self.certificates)
+
+ def __len__(self):
+ return len(self.certificates)
+
+ def __getitem__(self, index):
+ return self.certificates[index]
+
+class Certificate(WindowsAzureData):
+ def __init__(self):
+ self.certificate_url = ''
+ self.thumbprint = ''
+ self.thumbprint_algorithm = ''
+ self.data = ''
+
+class OperationError(WindowsAzureData):
+ def __init__(self):
+ self.code = ''
+ self.message = ''
+
+class Operation(WindowsAzureData):
+ def __init__(self):
+ self.id = ''
+ self.status = ''
+ self.http_status_code = ''
+ self.error = OperationError()
+
+class OperatingSystem(WindowsAzureData):
+ def __init__(self):
+ self.version = ''
+ self.label = _Base64String()
+ self.is_default = True
+ self.is_active = True
+ self.family = 0
+ self.family_label = _Base64String()
+
+class OperatingSystems(WindowsAzureData):
+ def __init__(self):
+ self.operating_systems = _list_of(OperatingSystem)
+
+ def __iter__(self):
+ return iter(self.operating_systems)
+
+ def __len__(self):
+ return len(self.operating_systems)
+
+ def __getitem__(self, index):
+ return self.operating_systems[index]
+
+class OperatingSystemFamily(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.label = _Base64String()
+ self.operating_systems = OperatingSystems()
+
+class OperatingSystemFamilies(WindowsAzureData):
+ def __init__(self):
+ self.operating_system_families = _list_of(OperatingSystemFamily)
+
+ def __iter__(self):
+ return iter(self.operating_system_families)
+
+ def __len__(self):
+ return len(self.operating_system_families)
+
+ def __getitem__(self, index):
+ return self.operating_system_families[index]
+
+class Subscription(WindowsAzureData):
+ def __init__(self):
+ self.subscription_id = ''
+ self.subscription_name = ''
+ self.subscription_status = ''
+ self.account_admin_live_email_id = ''
+ self.service_admin_live_email_id = ''
+ self.max_core_count = 0
+ self.max_storage_accounts = 0
+ self.max_hosted_services = 0
+ self.current_core_count = 0
+ self.current_hosted_services = 0
+ self.current_storage_accounts = 0
+ self.max_virtual_network_sites = 0
+ self.max_local_network_sites = 0
+ self.max_dns_servers = 0
+
+class AvailabilityResponse(WindowsAzureData):
+ def __init__(self):
+ self.result = False
+
+class SubscriptionCertificates(WindowsAzureData):
+ def __init__(self):
+ self.subscription_certificates = _list_of(SubscriptionCertificate)
+
+ def __iter__(self):
+ return iter(self.subscription_certificates)
+
+ def __len__(self):
+ return len(self.subscription_certificates)
+
+ def __getitem__(self, index):
+ return self.subscription_certificates[index]
+
+class SubscriptionCertificate(WindowsAzureData):
+ def __init__(self):
+ self.subscription_certificate_public_key = ''
+ self.subscription_certificate_thumbprint = ''
+ self.subscription_certificate_data = ''
+ self.created = ''
+
+class Images(WindowsAzureData):
+ def __init__(self):
+ self.images = _list_of(OSImage)
+
+ def __iter__(self):
+ return iter(self.images)
+
+ def __len__(self):
+ return len(self.images)
+
+ def __getitem__(self, index):
+ return self.images[index]
+
+class OSImage(WindowsAzureData):
+ def __init__(self):
+ self.affinity_group = ''
+ self.category = ''
+ self.location = ''
+ self.logical_size_in_gb = 0
+ self.label = ''
+ self.media_link = ''
+ self.name = ''
+ self.os = ''
+ self.eula = ''
+ self.description = ''
+
+class Disks(WindowsAzureData):
+ def __init__(self):
+ self.disks = _list_of(Disk)
+
+ def __iter__(self):
+ return iter(self.disks)
+
+ def __len__(self):
+ return len(self.disks)
+
+ def __getitem__(self, index):
+ return self.disks[index]
+
+class Disk(WindowsAzureData):
+ def __init__(self):
+ self.affinity_group = ''
+ self.attached_to = AttachedTo()
+ self.has_operating_system = ''
+ self.is_corrupted = ''
+ self.location = ''
+ self.logical_disk_size_in_gb = 0
+ self.label = ''
+ self.media_link= ''
+ self.name = ''
+ self.os = ''
+ self.source_image_name = ''
+
+class AttachedTo(WindowsAzureData):
+ def __init__(self):
+ self.hosted_service_name = ''
+ self.deployment_name = ''
+ self.role_name = ''
+
+class PersistentVMRole(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.role_type= ''
+ self.os_version = '' # undocumented
+ self.configuration_sets = ConfigurationSets()
+ self.availability_set_name = ''
+ self.data_virtual_hard_disks = DataVirtualHardDisks()
+ self.os_virtual_hard_disk = OSVirtualHardDisk()
+ self.role_size = ''
+
+class ConfigurationSets(WindowsAzureData):
+ def __init__(self):
+ self.configuration_sets = _list_of(ConfigurationSet)
+
+ def __iter__(self):
+ return iter(self.configuration_sets)
+
+ def __len__(self):
+ return len(self.configuration_sets)
+
+ def __getitem__(self, index):
+ return self.configuration_sets[index]
+
+class ConfigurationSet(WindowsAzureData):
+ def __init__(self):
+ self.configuration_set_type = ''
+ self.role_type= ''
+ self.input_endpoints = ConfigurationSetInputEndpoints()
+ self.subnet_names = _scalar_list_of(str, 'SubnetName')
+
+class ConfigurationSetInputEndpoints(WindowsAzureData):
+ def __init__(self):
+ self.input_endpoints = _list_of(ConfigurationSetInputEndpoint, 'InputEndpoint')
+
+ def __iter__(self):
+ return iter(self.input_endpoints)
+
+ def __len__(self):
+ return len(self.input_endpoints)
+
+ def __getitem__(self, index):
+ return self.input_endpoints[index]
+
+class ConfigurationSetInputEndpoint(WindowsAzureData):
+ '''
+ Initializes a network configuration input endpoint.
+
+ name: Specifies the name for the external endpoint.
+ protocol: Specifies the protocol to use to inspect the virtual machine availability status. Possible values are: HTTP, TCP.
+ port: Specifies the external port to use for the endpoint.
+ local_port: Specifies the internal port on which the virtual machine is listening to serve the endpoint.
+ load_balanced_endpoint_set_name: Specifies a name for a set of load-balanced endpoints. Specifying this element for a given endpoint adds it to the set. If you are setting an endpoint to use to connect to the virtual machine via the Remote Desktop, do not set this property.
+ enable_direct_server_return: Specifies whether direct server return load balancing is enabled.
+ '''
+ def __init__(self, name='', protocol='', port='', local_port='', load_balanced_endpoint_set_name='', enable_direct_server_return=False):
+ self.enable_direct_server_return = enable_direct_server_return
+ self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
+ self.local_port = local_port
+ self.name = name
+ self.port = port
+ self.load_balancer_probe = LoadBalancerProbe()
+ self.protocol = protocol
+
+class WindowsConfigurationSet(WindowsAzureData):
+ def __init__(self, computer_name=None, admin_password=None, reset_password_on_first_logon=None, enable_automatic_updates=None, time_zone=None):
+ self.configuration_set_type = 'WindowsProvisioningConfiguration'
+ self.computer_name = computer_name
+ self.admin_password = admin_password
+ self.reset_password_on_first_logon = reset_password_on_first_logon
+ self.enable_automatic_updates = enable_automatic_updates
+ self.time_zone = time_zone
+ self.domain_join = DomainJoin()
+ self.stored_certificate_settings = StoredCertificateSettings()
+
+class DomainJoin(WindowsAzureData):
+ def __init__(self):
+ self.credentials = Credentials()
+ self.join_domain = ''
+ self.machine_object_ou = ''
+
+class Credentials(WindowsAzureData):
+ def __init(self):
+ self.domain = ''
+ self.username = ''
+ self.password = ''
+
+class StoredCertificateSettings(WindowsAzureData):
+ def __init__(self):
+ self.stored_certificate_settings = _list_of(CertificateSetting)
+
+ def __iter__(self):
+ return iter(self.stored_certificate_settings)
+
+ def __len__(self):
+ return len(self.stored_certificate_settings)
+
+ def __getitem__(self, index):
+ return self.stored_certificate_settings[index]
+
+class CertificateSetting(WindowsAzureData):
+ '''
+ Initializes a certificate setting.
+
+ thumbprint: Specifies the thumbprint of the certificate to be provisioned. The thumbprint must specify an existing service certificate.
+ store_name: Specifies the name of the certificate store from which retrieve certificate.
+ store_location: Specifies the target certificate store location on the virtual machine. The only supported value is LocalMachine.
+ '''
+ def __init__(self, thumbprint='', store_name='', store_location=''):
+ self.thumbprint = thumbprint
+ self.store_name = store_name
+ self.store_location = store_location
+
+class LinuxConfigurationSet(WindowsAzureData):
+ def __init__(self, host_name=None, user_name=None, user_password=None, disable_ssh_password_authentication=None):
+ self.configuration_set_type = 'LinuxProvisioningConfiguration'
+ self.host_name = host_name
+ self.user_name = user_name
+ self.user_password = user_password
+ self.disable_ssh_password_authentication = disable_ssh_password_authentication
+ self.ssh = SSH()
+
+class SSH(WindowsAzureData):
+ def __init__(self):
+ self.public_keys = PublicKeys()
+ self.key_pairs = KeyPairs()
+
+class PublicKeys(WindowsAzureData):
+ def __init__(self):
+ self.public_keys = _list_of(PublicKey)
+
+ def __iter__(self):
+ return iter(self.public_keys)
+
+ def __len__(self):
+ return len(self.public_keys)
+
+ def __getitem__(self, index):
+ return self.public_keys[index]
+
+class PublicKey(WindowsAzureData):
+ def __init__(self):
+ self.finger_print = ''
+ self.path = ''
+
+class KeyPairs(WindowsAzureData):
+ def __init__(self):
+ self.key_pairs = _list_of(KeyPair)
+
+ def __iter__(self):
+ return iter(self.key_pairs)
+
+ def __len__(self):
+ return len(self.key_pairs)
+
+ def __getitem__(self, index):
+ return self.key_pairs[index]
+
+class KeyPair(WindowsAzureData):
+ def __init__(self):
+ self.finger_print = ''
+ self.path = ''
+
+class LoadBalancerProbe(WindowsAzureData):
+ def __init__(self):
+ self.path = ''
+ self.port = ''
+ self.protocol = ''
+
+class DataVirtualHardDisks(WindowsAzureData):
+ def __init__(self):
+ self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk)
+
+ def __iter__(self):
+ return iter(self.data_virtual_hard_disks)
+
+ def __len__(self):
+ return len(self.data_virtual_hard_disks)
+
+ def __getitem__(self, index):
+ return self.data_virtual_hard_disks[index]
+
+class DataVirtualHardDisk(WindowsAzureData):
+ def __init__(self):
+ self.host_caching = ''
+ self.disk_label = ''
+ self.disk_name = ''
+ self.lun = 0
+ self.logical_disk_size_in_gb = 0
+ self.media_link = ''
+
+class OSVirtualHardDisk(WindowsAzureData):
+ def __init__(self, source_image_name=None, media_link=None, host_caching=None, disk_label=None, disk_name=None):
+ self.source_image_name = source_image_name
+ self.media_link = media_link
+ self.host_caching = host_caching
+ self.disk_label = disk_label
+ self.disk_name = disk_name
+ self.os = '' # undocumented, not used when adding a role
+
+class AsynchronousOperationResult(WindowsAzureData):
+ def __init__(self, request_id=None):
+ self.request_id = request_id
+
+def _update_management_header(request):
+ ''' Add additional headers for management. '''
+
+ if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
+ request.headers.append(('Content-Length', str(len(request.body))))
+
+ #append additional headers base on the service
+ request.headers.append(('x-ms-version', X_MS_VERSION))
+
+ # if it is not GET or HEAD request, must set content-type.
+ if not request.method in ['GET', 'HEAD']:
+ for name, value in request.headers:
+ if 'content-type' == name.lower():
+ break
+ else:
+ request.headers.append(('Content-Type', 'application/atom+xml;type=entry;charset=utf-8'))
+
+ return request.headers
+
+def _parse_response_for_async_op(response):
+ ''' Extracts request id from response header. '''
+
+ if response is None:
+ return None
+
+ result = AsynchronousOperationResult()
+ if response.headers:
+ for name, value in response.headers:
+ if name.lower() == 'x-ms-request-id':
+ result.request_id = value
+
+ return result
+
+def _management_error_handler(http_error):
+ ''' Simple error handler for management service. Will add more specific cases '''
+ return _general_error_handler(http_error)
+
+def _lower(text):
+ return text.lower()
+
+class _XmlSerializer(object):
+ @staticmethod
+ def create_storage_service_input_to_xml(service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties):
+ return _XmlSerializer.doc_from_data('CreateStorageServiceInput',
+ [('ServiceName', service_name),
+ ('Description', description),
+ ('Label', label, base64.b64encode),
+ ('AffinityGroup', affinity_group),
+ ('Location', location),
+ ('GeoReplicationEnabled', geo_replication_enabled, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def update_storage_service_input_to_xml(description, label, geo_replication_enabled, extended_properties):
+ return _XmlSerializer.doc_from_data('UpdateStorageServiceInput',
+ [('Description', description),
+ ('Label', label, base64.b64encode),
+ ('GeoReplicationEnabled', geo_replication_enabled, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def regenerate_keys_to_xml(key_type):
+ return _XmlSerializer.doc_from_data('RegenerateKeys',
+ [('KeyType', key_type)])
+
+ @staticmethod
+ def update_hosted_service_to_xml(label, description, extended_properties):
+ return _XmlSerializer.doc_from_data('UpdateHostedService',
+ [('Label', label, base64.b64encode),
+ ('Description', description)],
+ extended_properties)
+
+ @staticmethod
+ def create_hosted_service_to_xml(service_name, label, description, location, affinity_group, extended_properties):
+ return _XmlSerializer.doc_from_data('CreateHostedService',
+ [('ServiceName', service_name),
+ ('Label', label, base64.b64encode),
+ ('Description', description),
+ ('Location', location),
+ ('AffinityGroup', affinity_group)],
+ extended_properties)
+
+ @staticmethod
+ def create_deployment_to_xml(name, package_url, label, configuration, start_deployment, treat_warnings_as_error, extended_properties):
+ return _XmlSerializer.doc_from_data('CreateDeployment',
+ [('Name', name),
+ ('PackageUrl', package_url),
+ ('Label', label, base64.b64encode),
+ ('Configuration', configuration),
+ ('StartDeployment', start_deployment, _lower),
+ ('TreatWarningsAsError', treat_warnings_as_error, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def swap_deployment_to_xml(production, source_deployment):
+ return _XmlSerializer.doc_from_data('Swap',
+ [('Production', production),
+ ('SourceDeployment', source_deployment)])
+
+ @staticmethod
+ def update_deployment_status_to_xml(status):
+ return _XmlSerializer.doc_from_data('UpdateDeploymentStatus',
+ [('Status', status)])
+
+ @staticmethod
+ def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, extended_properties):
+ return _XmlSerializer.doc_from_data('ChangeConfiguration',
+ [('Configuration', configuration),
+ ('TreatWarningsAsError', treat_warnings_as_error, _lower),
+ ('Mode', mode)],
+ extended_properties)
+
+ @staticmethod
+ def upgrade_deployment_to_xml(mode, package_url, configuration, label, role_to_upgrade, force, extended_properties):
+ return _XmlSerializer.doc_from_data('UpgradeDeployment',
+ [('Mode', mode),
+ ('PackageUrl', package_url),
+ ('Configuration', configuration),
+ ('Label', label, base64.b64encode),
+ ('RoleToUpgrade', role_to_upgrade),
+ ('Force', force, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def rollback_upgrade_to_xml(mode, force):
+ return _XmlSerializer.doc_from_data('RollbackUpdateOrUpgrade',
+ [('Mode', mode),
+ ('Force', force, _lower)])
+
+ @staticmethod
+ def walk_upgrade_domain_to_xml(upgrade_domain):
+ return _XmlSerializer.doc_from_data('WalkUpgradeDomain',
+ [('UpgradeDomain', upgrade_domain)])
+
+ @staticmethod
+ def certificate_file_to_xml(data, certificate_format, password):
+ return _XmlSerializer.doc_from_data('CertificateFile',
+ [('Data', data),
+ ('CertificateFormat', certificate_format),
+ ('Password', password)])
+
+ @staticmethod
+ def create_affinity_group_to_xml(name, label, description, location):
+ return _XmlSerializer.doc_from_data('CreateAffinityGroup',
+ [('Name', name),
+ ('Label', label, base64.b64encode),
+ ('Description', description),
+ ('Location', location)])
+
+ @staticmethod
+ def update_affinity_group_to_xml(label, description):
+ return _XmlSerializer.doc_from_data('UpdateAffinityGroup',
+ [('Label', label, base64.b64encode),
+ ('Description', description)])
+
+ @staticmethod
+ def subscription_certificate_to_xml(public_key, thumbprint, data):
+ return _XmlSerializer.doc_from_data('SubscriptionCertificate',
+ [('SubscriptionCertificatePublicKey', public_key),
+ ('SubscriptionCertificateThumbprint', thumbprint),
+ ('SubscriptionCertificateData', data)])
+
+ @staticmethod
+ def os_image_to_xml(label, media_link, name, os):
+ return _XmlSerializer.doc_from_data('OSImage',
+ [('Label', label),
+ ('MediaLink', media_link),
+ ('Name', name),
+ ('OS', os)])
+
+ @staticmethod
+ def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, logical_disk_size_in_gb, media_link, source_media_link):
+ return _XmlSerializer.doc_from_data('DataVirtualHardDisk',
+ [('HostCaching', host_caching),
+ ('DiskLabel', disk_label),
+ ('DiskName', disk_name),
+ ('Lun', lun),
+ ('LogicalDiskSizeInGB', logical_disk_size_in_gb),
+ ('MediaLink', media_link),
+ ('SourceMediaLink', source_media_link)])
+
+ @staticmethod
+ def disk_to_xml(has_operating_system, label, media_link, name, os):
+ return _XmlSerializer.doc_from_data('Disk',
+ [('HasOperatingSystem', has_operating_system, _lower),
+ ('Label', label),
+ ('MediaLink', media_link),
+ ('Name', name),
+ ('OS', os)])
+
+ @staticmethod
+ def restart_role_operation_to_xml():
+ return _XmlSerializer.doc_from_xml('RestartRoleOperation',
+ 'RestartRoleOperation')
+
+ @staticmethod
+ def shutdown_role_operation_to_xml():
+ return _XmlSerializer.doc_from_xml('ShutdownRoleOperation',
+ 'ShutdownRoleOperation')
+
+ @staticmethod
+ def start_role_operation_to_xml():
+ return _XmlSerializer.doc_from_xml('StartRoleOperation',
+ 'StartRoleOperation')
+
+ @staticmethod
+ def windows_configuration_to_xml(configuration):
+ xml = _XmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type),
+ ('ComputerName', configuration.computer_name),
+ ('AdminPassword', configuration.admin_password, base64.b64encode),
+ ('ResetPasswordOnFirstLogon', configuration.reset_password_on_first_logon, _lower),
+ ('EnableAutomaticUpdates', configuration.enable_automatic_updates, _lower),
+ ('TimeZone', configuration.time_zone)])
+
+ if configuration.domain_join is not None:
+ xml += ''
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('Domain', configuration.domain_join.credentials.domain),
+ ('Username', configuration.domain_join.credentials.username),
+ ('Password', configuration.domain_join.credentials.password)])
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('JoinDomain', configuration.domain_join.join_domain),
+ ('MachineObjectOU', configuration.domain_join.machine_object_ou)])
+ xml += ''
+ if configuration.stored_certificate_settings is not None:
+ xml += ''
+ for cert in configuration.stored_certificate_settings:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('StoreLocation', cert.store_location),
+ ('StoreName', cert.store_name),
+ ('Thumbprint', cert.thumbprint)])
+ xml += ''
+ xml += ''
+ return xml
+
+ @staticmethod
+ def linux_configuration_to_xml(configuration):
+ xml = _XmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type),
+ ('HostName', configuration.host_name),
+ ('UserName', configuration.user_name),
+ ('UserPassword', configuration.user_password),
+ ('DisableSshPasswordAuthentication', configuration.disable_ssh_password_authentication, _lower)])
+
+ if configuration.ssh is not None:
+ xml += ''
+ xml += ''
+ for key in configuration.ssh.public_keys:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('FingerPrint', key.finger_print),
+ ('Path', key.path)])
+ xml += ''
+ xml += ''
+ xml += ''
+ for key in configuration.ssh.key_pairs:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('FingerPrint', key.finger_print),
+ ('Path', key.path)])
+ xml += ''
+ xml += ''
+ xml += ''
+ return xml
+
+ @staticmethod
+ def network_configuration_to_xml(configuration):
+ xml = _XmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type)])
+ xml += ''
+ for endpoint in configuration.input_endpoints:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('EnableDirectServerReturn', endpoint.enable_direct_server_return, _lower),
+ ('LoadBalancedEndpointSetName', endpoint.load_balanced_endpoint_set_name),
+ ('LocalPort', endpoint.local_port),
+ ('Name', endpoint.name),
+ ('Port', endpoint.port)])
+
+ if endpoint.load_balancer_probe.path or endpoint.load_balancer_probe.port or endpoint.load_balancer_probe.protocol:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('Path', endpoint.load_balancer_probe.path),
+ ('Port', endpoint.load_balancer_probe.port),
+ ('Protocol', endpoint.load_balancer_probe.protocol)])
+ xml += ''
+
+ xml += _XmlSerializer.data_to_xml([('Protocol', endpoint.protocol)])
+ xml += ''
+ xml += ''
+ xml += ''
+ for name in configuration.subnet_names:
+ xml += _XmlSerializer.data_to_xml([('SubnetName', name)])
+ xml += ''
+ return xml
+
+ @staticmethod
+ def role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set):
+ xml = _XmlSerializer.data_to_xml([('RoleName', role_name),
+ ('RoleType', role_type)])
+
+ xml += ''
+
+ if system_configuration_set is not None:
+ xml += ''
+ if isinstance(system_configuration_set, WindowsConfigurationSet):
+ xml += _XmlSerializer.windows_configuration_to_xml(system_configuration_set)
+ elif isinstance(system_configuration_set, LinuxConfigurationSet):
+ xml += _XmlSerializer.linux_configuration_to_xml(system_configuration_set)
+ xml += ''
+
+ if network_configuration_set is not None:
+ xml += ''
+ xml += _XmlSerializer.network_configuration_to_xml(network_configuration_set)
+ xml += ''
+
+ xml += ''
+
+ if availability_set_name is not None:
+ xml += _XmlSerializer.data_to_xml([('AvailabilitySetName', availability_set_name)])
+
+ if data_virtual_hard_disks is not None:
+ xml += ''
+ for hd in data_virtual_hard_disks:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('HostCaching', hd.host_caching),
+ ('DiskLabel', hd.disk_label),
+ ('DiskName', hd.disk_name),
+ ('Lun', hd.lun),
+ ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb),
+ ('MediaLink', hd.media_link)])
+ xml += ''
+ xml += ''
+
+ if os_virtual_hard_disk is not None:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('HostCaching', os_virtual_hard_disk.host_caching),
+ ('DiskLabel', os_virtual_hard_disk.disk_label),
+ ('DiskName', os_virtual_hard_disk.disk_name),
+ ('MediaLink', os_virtual_hard_disk.media_link),
+ ('SourceImageName', os_virtual_hard_disk.source_image_name)])
+ xml += ''
+
+ if role_size is not None:
+ xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])
+
+ return xml
+
+ @staticmethod
+ def add_role_to_xml(role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ xml = _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set)
+ return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)
+
+ @staticmethod
+ def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ xml = _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, None)
+ return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)
+
+ @staticmethod
+ def capture_role_to_xml(post_capture_action, target_image_name, target_image_label, provisioning_configuration):
+ xml = _XmlSerializer.data_to_xml([('OperationType', 'CaptureRoleOperation'),
+ ('PostCaptureAction', post_capture_action)])
+ if provisioning_configuration is not None:
+ xml += ''
+ if isinstance(provisioning_configuration, WindowsConfigurationSet):
+ xml += _XmlSerializer.windows_configuration_to_xml(provisioning_configuration)
+ elif isinstance(provisioning_configuration, LinuxConfigurationSet):
+ xml += _XmlSerializer.linux_configuration_to_xml(provisioning_configuration)
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('TargetImageLabel', target_image_label),
+ ('TargetImageName', target_image_name)])
+ return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
+
+ @staticmethod
+ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ xml = _XmlSerializer.data_to_xml([('Name', deployment_name),
+ ('DeploymentSlot', deployment_slot),
+ ('Label', label, base64.b64encode)])
+ xml += ''
+ xml += ''
+ xml += _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set)
+ xml += ''
+ xml += ''
+ return _XmlSerializer.doc_from_xml('Deployment', xml)
+
+ @staticmethod
+ def data_to_xml(data):
+ '''Creates an xml fragment from the specified data.
+ data: Array of tuples, where first: xml element name
+ second: xml element text
+ third: conversion function
+ '''
+ xml = ''
+ for element in data:
+ name = element[0]
+ val = element[1]
+ if len(element) > 2:
+ converter = element[2]
+ else:
+ converter = None
+
+ if val is not None:
+ if converter is not None:
+ text = converter(str(val))
+ else:
+ text = str(val)
+ xml += ''.join(['<', name, '>', text, '', name, '>'])
+ return xml
+
+ @staticmethod
+ def doc_from_xml(document_element_name, inner_xml):
+ '''Wraps the specified xml in an xml root element with default azure namespaces'''
+ xml = ''.join(['<', document_element_name, ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/windowsazure">'])
+ xml += inner_xml
+ xml += ''.join(['', document_element_name, '>'])
+ return xml
+
+ @staticmethod
+ def doc_from_data(document_element_name, data, extended_properties=None):
+ xml = _XmlSerializer.data_to_xml(data)
+ if extended_properties is not None:
+ xml += _XmlSerializer.extended_properties_dict_to_xml_fragment(extended_properties)
+ return _XmlSerializer.doc_from_xml(document_element_name, xml)
+
+ @staticmethod
+ def extended_properties_dict_to_xml_fragment(extended_properties):
+ xml = ''
+ if extended_properties is not None and len(extended_properties) > 0:
+ xml += ''
+ for key, val in extended_properties.items():
+ xml += ''.join(['', '', str(key), '', '', str(val), '', ''])
+ xml += ''
+ return xml
+
+from azure.servicemanagement.servicemanagementservice import ServiceManagementService
diff --git a/src/azure/servicemanagement/servicemanagementservice.py b/src/azure/servicemanagement/servicemanagementservice.py
new file mode 100644
index 000000000000..c9f287f45a7e
--- /dev/null
+++ b/src/azure/servicemanagement/servicemanagementservice.py
@@ -0,0 +1,1468 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+import base64
+import os
+import urllib2
+
+from azure.http.httpclient import _HTTPClient
+from azure.http import HTTPError
+from azure.servicemanagement import *
+from azure.servicemanagement import (_update_management_header,
+ _management_error_handler,
+ _parse_response_for_async_op,
+ _XmlSerializer)
+from azure.http import HTTPRequest
+from azure import (_validate_not_none,
+ _get_request_body, _update_request_uri_query,
+ WindowsAzureError, _parse_response,
+ MANAGEMENT_HOST)
+
+class ServiceManagementService:
+ def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST):
+ self.requestid = None
+ self.subscription_id = subscription_id
+ self.cert_file = cert_file
+ self.host = host
+
+ if not self.cert_file:
+ if os.environ.has_key(AZURE_MANAGEMENT_CERTFILE):
+ self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
+
+ if not self.subscription_id:
+ if os.environ.has_key(AZURE_MANAGEMENT_SUBSCRIPTIONID):
+ self.subscription_id = os.environ[AZURE_MANAGEMENT_SUBSCRIPTIONID]
+
+ if not self.cert_file or not self.subscription_id:
+ raise WindowsAzureError('You need to provide subscription id and certificate file')
+
+ self._httpclient = _HTTPClient(service_instance=self, cert_file=self.cert_file)
+ self._filter = self._httpclient.perform_request
+
+ def with_filter(self, filter):
+ '''Returns a new service which will process requests with the
+ specified filter. Filtering operations can include logging, automatic
+ retrying, etc... The filter is a lambda which receives the HTTPRequest
+ and another lambda. The filter can perform any pre-processing on the
+ request, pass it off to the next lambda, and then perform any post-processing
+ on the response.'''
+ res = ServiceManagementService(self.subscription_id, self.cert_file)
+ old_filter = self._filter
+ def new_filter(request):
+ return filter(request, old_filter)
+
+ res._filter = new_filter
+ return res
+
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ #--Operations for storage accounts -----------------------------------
+ def list_storage_accounts(self):
+ '''
+ Lists the storage accounts available under the current subscription.
+ '''
+ return self._perform_get(self._get_storage_service_path(),
+ StorageServices)
+
+ def get_storage_account_properties(self, service_name):
+ '''
+ Returns system properties for the specified storage account.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get(self._get_storage_service_path(service_name),
+ StorageService)
+
+ def get_storage_account_keys(self, service_name):
+ '''
+ Returns the primary and secondary access keys for the specified storage account.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get(self._get_storage_service_path(service_name) + '/keys',
+ StorageService)
+
+ def regenerate_storage_account_keys(self, service_name, key_type):
+ '''
+ Regenerates the primary or secondary access key for the specified storage account.
+
+ service_name: Name of the storage service account.
+ key_type: Specifies which key to regenerate. Valid values are: Primary, Secondary
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('key_type', key_type)
+ return self._perform_post(self._get_storage_service_path(service_name) + '/keys?action=regenerate',
+ _XmlSerializer.regenerate_keys_to_xml(key_type),
+ StorageService)
+
+ def create_storage_account(self, service_name, description, label, affinity_group=None, location=None, geo_replication_enabled=True, extended_properties=None):
+ '''
+ Creates a new storage account in Windows Azure.
+
+ service_name: A name for the storage account that is unique within
+ Windows Azure. Storage account names must be between 3
+ and 24 characters in length and use numbers and
+ lower-case letters only.
+ description: A description for the storage account. The description
+ may be up to 1024 characters in length.
+ label: A name for the storage account specified as a base64-encoded
+ string. The name may be up to 100 characters in length. The
+ name can be used identify the storage account for your tracking
+ purposes.
+ affinity_group: The name of an existing affinity group in the
+ specified subscription. You can specify either a
+ location or affinity_group, but not both.
+ location: The location where the storage account is created. You can
+ specify either a location or affinity_group, but not both.
+ geo_replication_enabled: Specifies whether the storage account is
+ created with the geo-replication enabled. If
+ the element is not included in the request
+ body, the default value is true. If set to
+ true, the data in the storage account is
+ replicated across more than one geographic
+ location so as to enable resilience in the
+ face of catastrophic service loss.
+ extended_properties: Dictionary containing name/value pairs of storage
+ account properties. You can have a maximum of 50
+ extended property name/value pairs. The maximum
+ length of the Name element is 64 characters, only
+ alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a
+ letter. The value has a maximum length of 255
+ characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('description', description)
+ _validate_not_none('label', label)
+ if affinity_group is None and location is None:
+ raise WindowsAzureError('location or affinity_group must be specified')
+ if affinity_group is not None and location is not None:
+ raise WindowsAzureError('Only one of location or affinity_group needs to be specified')
+ return self._perform_post(self._get_storage_service_path(),
+ _XmlSerializer.create_storage_service_input_to_xml(service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties),
+ async=True)
+
+ def update_storage_account(self, service_name, description=None, label=None, geo_replication_enabled=None, extended_properties=None):
+ '''
+ Updates the label, the description, and enables or disables the
+ geo-replication status for a storage account in Windows Azure.
+
+ service_name: Name of the storage service account.
+ description: A description for the storage account. The description
+ may be up to 1024 characters in length.
+ label: A name for the storage account specified as a base64-encoded
+ string. The name may be up to 100 characters in length. The
+ name can be used identify the storage account for your tracking
+ purposes.
+ geo_replication_enabled: Specifies whether the storage account is
+ created with the geo-replication enabled. If
+ the element is not included in the request
+ body, the default value is true. If set to
+ true, the data in the storage account is
+ replicated across more than one geographic
+ location so as to enable resilience in the
+ face of catastrophic service loss.
+ extended_properties: Dictionary containing name/value pairs of storage
+ account properties. You can have a maximum of 50
+ extended property name/value pairs. The maximum
+ length of the Name element is 64 characters, only
+ alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a
+ letter. The value has a maximum length of 255
+ characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_put(self._get_storage_service_path(service_name),
+ _XmlSerializer.update_storage_service_input_to_xml(description, label, geo_replication_enabled, extended_properties))
+
+ def delete_storage_account(self, service_name):
+ '''
+ Deletes the specified storage account from Windows Azure.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_delete(self._get_storage_service_path(service_name))
+
+
+ def check_storage_account_name_availability(self, service_name):
+ '''
+ Checks to see if the specified storage account name is available, or
+ if it has already been taken.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get(self._get_storage_service_path() + '/operations/isavailable/' + str(service_name) + '',
+ AvailabilityResponse)
+
+ #--Operations for hosted services ------------------------------------
+ def list_hosted_services(self):
+ '''
+ Lists the hosted services available under the current subscription.
+ '''
+ return self._perform_get(self._get_hosted_service_path(),
+ HostedServices)
+
+ def get_hosted_service_properties(self, service_name, embed_detail=False):
+ '''
+ Retrieves system properties for the specified hosted service. These
+ properties include the service name and service type; the name of the
+ affinity group to which the service belongs, or its location if it is
+ not part of an affinity group; and optionally, information on the
+ service's deployments.
+
+ service_name: Name of the hosted service.
+ embed_detail: When True, the management service returns properties for
+ all deployments of the service, as well as for the
+ service itself.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('embed_detail', embed_detail)
+ return self._perform_get(self._get_hosted_service_path(service_name) + '?embed-detail=' + str(embed_detail).lower(),
+ HostedService)
+
+ def create_hosted_service(self, service_name, label, description=None, location=None, affinity_group=None, extended_properties=None):
+ '''
+ Creates a new hosted service in Windows Azure.
+
+ service_name: A name for the hosted service that is unique within
+ Windows Azure. This name is the DNS prefix name and can
+ be used to access the hosted service.
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. The name can be used
+ identify the storage account for your tracking purposes.
+ description: A description for the hosted service. The description can
+ be up to 1024 characters in length.
+ location: The location where the hosted service will be created. You
+ can specify either a location or affinity_group, but not
+ both.
+ affinity_group: The name of an existing affinity group associated with
+ this subscription. This name is a GUID and can be
+ retrieved by examining the name element of the response
+ body returned by list_affinity_groups. You can specify
+ either a location or affinity_group, but not both.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('label', label)
+ if affinity_group is None and location is None:
+ raise WindowsAzureError('location or affinity_group must be specified')
+ if affinity_group is not None and location is not None:
+ raise WindowsAzureError('Only one of location or affinity_group needs to be specified')
+ return self._perform_post(self._get_hosted_service_path(),
+ _XmlSerializer.create_hosted_service_to_xml(service_name, label, description, location, affinity_group, extended_properties))
+
+ def update_hosted_service(self, service_name, label=None, description=None, extended_properties=None):
+ '''
+ Updates the label and/or the description for a hosted service in
+ Windows Azure.
+
+ service_name: Name of the hosted service.
+ label: A name for the hosted service that is base64-encoded. The name
+ may be up to 100 characters in length. You must specify a value
+ for either Label or Description, or for both. It is recommended
+ that the label be unique within the subscription. The name can
+ be used identify the hosted service for your tracking purposes.
+ description: A description for the hosted service. The description may
+ be up to 1024 characters in length. You must specify a
+ value for either Label or Description, or for both.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_put(self._get_hosted_service_path(service_name),
+ _XmlSerializer.update_hosted_service_to_xml(label, description, extended_properties))
+
+ def delete_hosted_service(self, service_name):
+ '''
+ Deletes the specified hosted service from Windows Azure.
+
+ service_name: Name of the hosted service.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_delete(self._get_hosted_service_path(service_name))
+
+ def get_deployment_by_slot(self, service_name, deployment_slot):
+ '''
+ Returns configuration information, status, and system properties for
+ a deployment.
+
+ service_name: Name of the hosted service.
+ deployment_slot: The environment to which the hosted service is
+ deployed. Valid values are: staging, production
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_slot', deployment_slot)
+ return self._perform_get(self._get_deployment_path_using_slot(service_name, deployment_slot),
+ Deployment)
+
+ def get_deployment_by_name(self, service_name, deployment_name):
+ '''
+ Returns configuration information, status, and system properties for a
+ deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ return self._perform_get(self._get_deployment_path_using_name(service_name, deployment_name),
+ Deployment)
+
+ def create_deployment(self, service_name, deployment_slot, name, package_url, label, configuration, start_deployment=False, treat_warnings_as_error=False, extended_properties=None):
+ '''
+ Uploads a new service package and creates a new deployment on staging
+ or production.
+
+ service_name: Name of the hosted service.
+ deployment_slot: The environment to which the hosted service is
+ deployed. Valid values are: staging, production
+ name: The name for the deployment. The deployment name must be unique
+ among other deployments for the hosted service.
+ package_url: A URL that refers to the location of the service package
+ in the Blob service. The service package can be located
+ either in a storage account beneath the same subscription
+ or a Shared Access Signature (SAS) URI from any storage
+ account.
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that
+ the label be unique within the subscription. The name can be
+ used identify the hosted service for your tracking purposes.
+ configuration: The base-64 encoded service configuration file for the
+ deployment.
+ start_deployment: Indicates whether to start the deployment
+ immediately after it is created. If false, the
+ service model is still deployed to the virtual
+ machines but the code is not run immediately.
+ Instead, the service is Suspended until you call
+ Update Deployment Status and set the status to
+ Running, at which time the service will be started.
+ A deployed service still incurs charges, even if it
+ is suspended.
+ treat_warnings_as_error: Indicates whether to treat package validation
+ warnings as errors. If set to true, the
+ Created Deployment operation fails if there
+ are validation warnings on the service package.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_slot', deployment_slot)
+ _validate_not_none('name', name)
+ _validate_not_none('package_url', package_url)
+ _validate_not_none('label', label)
+ _validate_not_none('configuration', configuration)
+ return self._perform_post(self._get_deployment_path_using_slot(service_name, deployment_slot),
+ _XmlSerializer.create_deployment_to_xml(name, package_url, label, configuration, start_deployment, treat_warnings_as_error, extended_properties),
+ async=True)
+
+ def delete_deployment(self, service_name, deployment_name):
+ '''
+ Deletes the specified deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ return self._perform_delete(self._get_deployment_path_using_name(service_name, deployment_name),
+ async=True)
+
+ def swap_deployment(self, service_name, production, source_deployment):
+ '''
+ Initiates a virtual IP swap between the staging and production
+ deployment environments for a service. If the service is currently
+ running in the staging environment, it will be swapped to the
+ production environment. If it is running in the production
+ environment, it will be swapped to staging.
+
+ service_name: Name of the hosted service.
+ production: The name of the production deployment.
+ source_deployment: The name of the source deployment.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('production', production)
+ _validate_not_none('source_deployment', source_deployment)
+ return self._perform_post(self._get_hosted_service_path(service_name),
+ _XmlSerializer.swap_deployment_to_xml(production, source_deployment),
+ async=True)
+
+ def change_deployment_configuration(self, service_name, deployment_name, configuration, treat_warnings_as_error=False, mode='Auto', extended_properties=None):
+ '''
+ Initiates a change to the deployment configuration.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ configuration: The base-64 encoded service configuration file for the
+ deployment.
+ treat_warnings_as_error: Indicates whether to treat package validation
+ warnings as errors. If set to true, the
+ Created Deployment operation fails if there
+ are validation warnings on the service
+ package.
+ mode: If set to Manual, WalkUpgradeDomain must be called to apply the
+ update. If set to Auto, the Windows Azure platform will
+ automatically apply the update To each upgrade domain for the
+ service. Possible values are: Auto, Manual
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('configuration', configuration)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=config',
+ _XmlSerializer.change_deployment_to_xml(configuration, treat_warnings_as_error, mode, extended_properties),
+ async=True)
+
+ def update_deployment_status(self, service_name, deployment_name, status):
+ '''
+ Initiates a change in deployment status.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ status: The change to initiate to the deployment status. Possible
+ values include: Running, Suspended
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('status', status)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=status',
+ _XmlSerializer.update_deployment_status_to_xml(status),
+ async=True)
+
+ def upgrade_deployment(self, service_name, deployment_name, mode, package_url, configuration, label, force, role_to_upgrade=None, extended_properties=None):
+ '''
+ Initiates an upgrade.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ mode: If set to Manual, WalkUpgradeDomain must be called to apply the
+ update. If set to Auto, the Windows Azure platform will
+ automatically apply the update To each upgrade domain for the
+ service. Possible values are: Auto, Manual
+ package_url: A URL that refers to the location of the service package
+ in the Blob service. The service package can be located
+ either in a storage account beneath the same subscription
+ or a Shared Access Signature (SAS) URI from any storage
+ account.
+ configuration: The base-64 encoded service configuration file for the
+ deployment.
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that
+ the label be unique within the subscription. The name can be
+ used identify the hosted service for your tracking purposes.
+ force: Specifies whether the rollback should proceed even when it will
+ cause local data to be lost from some role instances. True if
+ the rollback should proceed; otherwise false if the rollback
+ should fail.
+ role_to_upgrade: The name of the specific role to upgrade.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('mode', mode)
+ _validate_not_none('package_url', package_url)
+ _validate_not_none('configuration', configuration)
+ _validate_not_none('label', label)
+ _validate_not_none('force', force)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=upgrade',
+ _XmlSerializer.upgrade_deployment_to_xml(mode, package_url, configuration, label, role_to_upgrade, force, extended_properties),
+ async=True)
+
+ def walk_upgrade_domain(self, service_name, deployment_name, upgrade_domain):
+ '''
+ Specifies the next upgrade domain to be walked during manual in-place
+ upgrade or configuration change.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ upgrade_domain: An integer value that identifies the upgrade domain
+ to walk. Upgrade domains are identified with a
+ zero-based index: the first upgrade domain has an ID
+ of 0, the second has an ID of 1, and so on.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('upgrade_domain', upgrade_domain)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=walkupgradedomain',
+ _XmlSerializer.walk_upgrade_domain_to_xml(upgrade_domain),
+ async=True)
+
+ def rollback_update_or_upgrade(self, service_name, deployment_name, mode, force):
+ '''
+ Cancels an in progress configuration change (update) or upgrade and
+ returns the deployment to its state before the upgrade or
+ configuration change was started.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ mode: Specifies whether the rollback should proceed automatically.
+ auto - The rollback proceeds without further user input.
+ manual - You must call the Walk Upgrade Domain operation to
+ apply the rollback to each upgrade domain.
+ force: Specifies whether the rollback should proceed even when it will
+ cause local data to be lost from some role instances. True if
+ the rollback should proceed; otherwise false if the rollback
+ should fail.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('mode', mode)
+ _validate_not_none('force', force)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=rollback',
+ _XmlSerializer.rollback_upgrade_to_xml(mode, force),
+ async=True)
+
+ def reboot_role_instance(self, service_name, deployment_name, role_instance_name):
+ '''
+ Requests a reboot of a role instance that is running in a deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ role_instance_name: The name of the role instance.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_instance_name', role_instance_name)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/roleinstances/' + str(role_instance_name) + '?comp=reboot',
+ '',
+ async=True)
+
+ def reimage_role_instance(self, service_name, deployment_name, role_instance_name):
+ '''
+ Requests a reimage of a role instance that is running in a deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ role_instance_name: The name of the role instance.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_instance_name', role_instance_name)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/roleinstances/' + str(role_instance_name) + '?comp=reimage',
+ '',
+ async=True)
+
+ def check_hosted_service_name_availability(self, service_name):
+ '''
+ Checks to see if the specified hosted service name is available, or if
+ it has already been taken.
+
+ service_name: Name of the hosted service.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get('/' + self.subscription_id + '/services/hostedservices/operations/isavailable/' + str(service_name) + '',
+ AvailabilityResponse)
+
+ #--Operations for service certificates -------------------------------
+ def list_service_certificates(self, service_name):
+ '''
+ Lists all of the service certificates associated with the specified
+ hosted service.
+
+ service_name: Name of the hosted service.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates',
+ Certificates)
+
+ def get_service_certificate(self, service_name, thumbalgorithm, thumbprint):
+ '''
+ Returns the public data for the specified X.509 certificate associated
+ with a hosted service.
+
+ service_name: Name of the hosted service.
+ thumbalgorithm: The algorithm for the certificate's thumbprint.
+ thumbprint: The hexadecimal representation of the thumbprint.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('thumbalgorithm', thumbalgorithm)
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_get('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates/' + str(thumbalgorithm) + '-' + str(thumbprint) + '',
+ Certificate)
+
+ def add_service_certificate(self, service_name, data, certificate_format, password):
+ '''
+ Adds a certificate to a hosted service.
+
+ service_name: Name of the hosted service.
+ data: The base-64 encoded form of the pfx file.
+ certificate_format: The service certificate format. The only supported
+ value is pfx.
+ password: The certificate password.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('data', data)
+ _validate_not_none('certificate_format', certificate_format)
+ _validate_not_none('password', password)
+ return self._perform_post('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates',
+ _XmlSerializer.certificate_file_to_xml(data, certificate_format, password),
+ async=True)
+
+ def delete_service_certificate(self, service_name, thumbalgorithm, thumbprint):
+ '''
+ Deletes a service certificate from the certificate store of a hosted
+ service.
+
+ service_name: Name of the hosted service.
+ thumbalgorithm: The algorithm for the certificate's thumbprint.
+ thumbprint: The hexadecimal representation of the thumbprint.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('thumbalgorithm', thumbalgorithm)
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_delete('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates/' + str(thumbalgorithm) + '-' + str(thumbprint),
+ async=True)
+
+ #--Operations for management certificates ----------------------------
+ def list_management_certificates(self):
+ '''
+ The List Management Certificates operation lists and returns basic
+ information about all of the management certificates associated with
+ the specified subscription. Management certificates, which are also
+ known as subscription certificates, authenticate clients attempting to
+ connect to resources associated with your Windows Azure subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/certificates',
+ SubscriptionCertificates)
+
+ def get_management_certificate(self, thumbprint):
+ '''
+ The Get Management Certificate operation retrieves information about
+ the management certificate with the specified thumbprint. Management
+ certificates, which are also known as subscription certificates,
+ authenticate clients attempting to connect to resources associated
+ with your Windows Azure subscription.
+
+ thumbprint: The thumbprint value of the certificate.
+ '''
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_get('/' + self.subscription_id + '/certificates/' + str(thumbprint),
+ SubscriptionCertificate)
+
+ def add_management_certificate(self, public_key, thumbprint, data):
+ '''
+ The Add Management Certificate operation adds a certificate to the
+ list of management certificates. Management certificates, which are
+ also known as subscription certificates, authenticate clients
+ attempting to connect to resources associated with your Windows Azure
+ subscription.
+
+ public_key: A base64 representation of the management certificate
+ public key.
+ thumbprint: The thumb print that uniquely identifies the management
+ certificate.
+ data: The certificate?s raw data in base-64 encoded .cer format.
+ '''
+ _validate_not_none('public_key', public_key)
+ _validate_not_none('thumbprint', thumbprint)
+ _validate_not_none('data', data)
+ return self._perform_post('/' + self.subscription_id + '/certificates',
+ _XmlSerializer.subscription_certificate_to_xml(public_key, thumbprint, data))
+
+ def delete_management_certificate(self, thumbprint):
+ '''
+ The Delete Management Certificate operation deletes a certificate from
+ the list of management certificates. Management certificates, which
+ are also known as subscription certificates, authenticate clients
+ attempting to connect to resources associated with your Windows Azure
+ subscription.
+
+ thumbprint: The thumb print that uniquely identifies the management certificate.
+ '''
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_delete('/' + self.subscription_id + '/certificates/' + str(thumbprint))
+
+ #--Operations for affinity groups ------------------------------------
+ def list_affinity_groups(self):
+ '''
+ Lists the affinity groups associated with the specified subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/affinitygroups',
+ AffinityGroups)
+
+ def get_affinity_group_properties(self, affinity_group_name):
+ '''
+ Returns the system properties associated with the specified affinity
+ group.
+
+ affinity_group_name: The name of the affinity group.
+ '''
+ _validate_not_none('affinity_group_name', affinity_group_name)
+ return self._perform_get('/' + self.subscription_id + '/affinitygroups/' + str(affinity_group_name) + '',
+ AffinityGroup)
+
+ def create_affinity_group(self, name, label, location, description=None):
+ '''
+ Creates a new affinity group for the specified subscription.
+
+ name: A name for the affinity group that is unique to the subscription.
+ label: A base-64 encoded name for the affinity group. The name can be
+ up to 100 characters in length.
+ location: The data center location where the affinity group will be
+ created. To list available locations, use the list_location
+ function.
+ description: A description for the affinity group. The description can
+ be up to 1024 characters in length.
+ '''
+ _validate_not_none('name', name)
+ _validate_not_none('label', label)
+ _validate_not_none('location', location)
+ return self._perform_post('/' + self.subscription_id + '/affinitygroups',
+ _XmlSerializer.create_affinity_group_to_xml(name, label, description, location))
+
+ def update_affinity_group(self, affinity_group_name, label, description=None):
+ '''
+ Updates the label and/or the description for an affinity group for the
+ specified subscription.
+
+ affinity_group_name: The name of the affinity group.
+ label: A name for the affinity specified as a base-64 encoded string.
+ The label can be up to 100 characters in length.
+ description: A description for the affinity group. The description can
+ be up to 1024 characters in length.
+ '''
+ _validate_not_none('affinity_group_name', affinity_group_name)
+ _validate_not_none('label', label)
+ return self._perform_put('/' + self.subscription_id + '/affinitygroups/' + str(affinity_group_name),
+ _XmlSerializer.update_affinity_group_to_xml(label, description))
+
+ def delete_affinity_group(self, affinity_group_name):
+ '''
+ Deletes an affinity group in the specified subscription.
+
+ affinity_group_name: The name of the affinity group.
+ '''
+ _validate_not_none('affinity_group_name', affinity_group_name)
+ return self._perform_delete('/' + self.subscription_id + '/affinitygroups/' + str(affinity_group_name))
+
+ #--Operations for locations ------------------------------------------
+ def list_locations(self):
+ '''
+ Lists all of the data center locations that are valid for your
+ subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/locations',
+ Locations)
+
+ #--Operations for tracking asynchronous requests ---------------------
+ def get_operation_status(self, request_id):
+ '''
+ Returns the status of the specified operation. After calling an
+ asynchronous operation, you can call Get Operation Status to determine
+ whether the operation has succeeded, failed, or is still in progress.
+
+ request_id: The request ID for the request you wish to track.
+ '''
+ _validate_not_none('request_id', request_id)
+ return self._perform_get('/' + self.subscription_id + '/operations/' + str(request_id),
+ Operation)
+
+ #--Operations for retrieving operating system information ------------
+ def list_operating_systems(self):
+ '''
+ Lists the versions of the guest operating system that are currently
+ available in Windows Azure.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/operatingsystems',
+ OperatingSystems)
+
+ def list_operating_system_families(self):
+ '''
+ Lists the guest operating system families available in Windows Azure,
+ and also lists the operating system versions available for each family.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/operatingsystemfamilies',
+ OperatingSystemFamilies)
+
+ #--Operations for retrieving subscription history --------------------
+ def get_subscription(self):
+ '''
+ Returns account and resource allocation information on the specified
+ subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '',
+ Subscription)
+
+ #--Operations for virtual machines -----------------------------------
+ def get_role(self, service_name, deployment_name, role_name):
+ '''
+ Retrieves the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_get(self._get_role_path(service_name, deployment_name, role_name),
+ PersistentVMRole)
+
+ def create_virtual_machine_deployment(self, service_name, deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ '''
+ Provisions a virtual machine based on the supplied configuration.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name for the deployment. The deployment name must
+ be unique among other deployments for the hosted
+ service.
+ deployment_slot: The environment to which the hosted service is
+ deployed. Valid values are: staging, production
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that
+ the label be unique within the subscription. The name can be
+ used identify the hosted service for your tracking purposes.
+ role_name: The name of the role.
+ system_config: Contains the metadata required to provision a virtual
+ machine from a Windows or Linux OS image. Use an
+ instance of WindowsConfigurationSet or
+ LinuxConfigurationSet.
+ os_virtual_hard_disk: Contains the parameters Windows Azure uses to
+ create the operating system disk for the virtual
+ machine.
+ network_config: Encapsulates the metadata required to create the
+ virtual network configuration for a virtual machine.
+ If you do not include a network configuration set you
+ will not be able to access the VM through VIPs over
+ the internet. If your virtual machine belongs to a
+ virtual network you can not specify which subnet
+ address space it resides under.
+ availability_set_name: Specifies the name of an availability set to
+ which to add the virtual machine. This value
+ controls the virtual machine allocation in the
+ Windows Azure environment. Virtual machines
+ specified in the same availability set are
+ allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks: Contains the parameters Windows Azure uses to
+ create a data disk for a virtual machine.
+ role_size: The size of the virtual machine to allocate. The default
+ value is Small. Possible values are: ExtraSmall, Small,
+ Medium, Large, ExtraLarge. The specified value must be
+ compatible with the disk selected in the OSVirtualHardDisk
+ values.
+ role_type: The type of the role for the virtual machine. The only
+ supported value is PersistentVMRole.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('deployment_slot', deployment_slot)
+ _validate_not_none('label', label)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('system_config', system_config)
+ _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)
+ return self._perform_post(self._get_deployment_path_using_name(service_name),
+ _XmlSerializer.virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ async=True)
+
+ def add_role(self, service_name, deployment_name, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ '''
+ Adds a virtual machine to an existing deployment.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ system_config: Contains the metadata required to provision a virtual
+ machine from a Windows or Linux OS image. Use an
+ instance of WindowsConfigurationSet or
+ LinuxConfigurationSet.
+ os_virtual_hard_disk: Contains the parameters Windows Azure uses to
+ create the operating system disk for the virtual
+ machine.
+ network_config: Encapsulates the metadata required to create the
+ virtual network configuration for a virtual machine.
+ If you do not include a network configuration set you
+ will not be able to access the VM through VIPs over
+ the internet. If your virtual machine belongs to a
+ virtual network you can not specify which subnet
+ address space it resides under.
+ availability_set_name: Specifies the name of an availability set to
+ which to add the virtual machine. This value
+ controls the virtual machine allocation in the
+ Windows Azure environment. Virtual machines
+ specified in the same availability set are
+ allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks: Contains the parameters Windows Azure uses to
+ create a data disk for a virtual machine.
+ role_size: The size of the virtual machine to allocate. The default
+ value is Small. Possible values are: ExtraSmall, Small,
+ Medium, Large, ExtraLarge. The specified value must be
+ compatible with the disk selected in the OSVirtualHardDisk
+ values.
+ role_type: The type of the role for the virtual machine. The only
+ supported value is PersistentVMRole.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('system_config', system_config)
+ _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)
+ return self._perform_post(self._get_role_path(service_name, deployment_name),
+ _XmlSerializer.add_role_to_xml(role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ async=True)
+
+ def update_role(self, service_name, deployment_name, role_name, os_virtual_hard_disk=None, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ '''
+ Updates the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ os_virtual_hard_disk: Contains the parameters Windows Azure uses to
+ create the operating system disk for the virtual
+ machine.
+ network_config: Encapsulates the metadata required to create the
+ virtual network configuration for a virtual machine.
+ If you do not include a network configuration set you
+ will not be able to access the VM through VIPs over
+ the internet. If your virtual machine belongs to a
+ virtual network you can not specify which subnet
+ address space it resides under.
+ availability_set_name: Specifies the name of an availability set to
+ which to add the virtual machine. This value
+ controls the virtual machine allocation in the
+ Windows Azure environment. Virtual machines
+ specified in the same availability set are
+ allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks: Contains the parameters Windows Azure uses to
+ create a data disk for a virtual machine.
+ role_size: The size of the virtual machine to allocate. The default
+ value is Small. Possible values are: ExtraSmall, Small,
+ Medium, Large, ExtraLarge. The specified value must be
+ compatible with the disk selected in the OSVirtualHardDisk
+ values.
+ role_type: The type of the role for the virtual machine. The only
+ supported value is PersistentVMRole.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_put(self._get_role_path(service_name, deployment_name, role_name),
+ _XmlSerializer.update_role_to_xml(role_name, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ async=True)
+
+ def delete_role(self, service_name, deployment_name, role_name):
+ '''
+ Deletes the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_delete(self._get_role_path(service_name, deployment_name, role_name),
+ async=True)
+
+ def capture_role(self, service_name, deployment_name, role_name, post_capture_action, target_image_name, target_image_label, provisioning_configuration=None):
+ '''
+ The Capture Role operation captures a virtual machine image to your
+ image gallery. From the captured image, you can create additional
+ customized virtual machines.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ post_capture_action: Specifies the action after capture operation
+ completes. Possible values are: Delete,
+ Reprovision.
+ target_image_name: Specifies the image name of the captured virtual
+ machine.
+ target_image_label: Specifies the friendly name of the captured
+ virtual machine.
+ provisioning_configuration: Use an instance of WindowsConfigurationSet
+ or LinuxConfigurationSet.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('post_capture_action', post_capture_action)
+ _validate_not_none('target_image_name', target_image_name)
+ _validate_not_none('target_image_label', target_image_label)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.capture_role_to_xml(post_capture_action, target_image_name, target_image_label, provisioning_configuration),
+ async=True)
+
+ def start_role(self, service_name, deployment_name, role_name):
+ '''
+ Starts the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.start_role_operation_to_xml(),
+ async=True)
+
+ def restart_role(self, service_name, deployment_name, role_name):
+ '''
+ Restarts the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.restart_role_operation_to_xml(),
+ async=True)
+
+ def shutdown_role(self, service_name, deployment_name, role_name):
+ '''
+ Shuts down the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.shutdown_role_operation_to_xml(),
+ async=True)
+
+ #--Operations for virtual machine images -----------------------------
+ def list_os_images(self):
+ '''
+ Retrieves a list of the OS images from the image repository.
+ '''
+ return self._perform_get(self._get_image_path(),
+ Images)
+
+ def get_os_image(self, image_name):
+ '''
+ Retrieves an OS image from the image repository.
+ '''
+ return self._perform_get(self._get_image_path(image_name),
+ OSImage)
+
+ def add_os_image(self, label, media_link, name, os):
+ '''
+ Adds an OS image that is currently stored in a storage account in your
+ subscription to the image repository.
+
+ label: Specifies the friendly name of the image.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the image is located. The blob
+ location must belong to a storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the OS image that Windows Azure uses to
+ identify the image when creating one or more virtual machines.
+ os: The operating system type of the OS image. Possible values are:
+ Linux, Windows
+ '''
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_post(self._get_image_path(),
+ _XmlSerializer.os_image_to_xml(label, media_link, name, os),
+ async=True)
+
+ def update_os_image(self, image_name, label, media_link, name, os):
+ '''
+ Updates an OS image that in your image repository.
+
+ image_name: The name of the image to update.
+ label: Specifies the friendly name of the image to be updated. You
+ cannot use this operation to update images provided by the
+ Windows Azure platform.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the image is located. The blob
+ location must belong to a storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the OS image that Windows Azure uses to
+ identify the image when creating one or more VM Roles.
+ os: The operating system type of the OS image. Possible values are:
+ Linux, Windows
+ '''
+ _validate_not_none('image_name', image_name)
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_put(self._get_image_path(image_name),
+ _XmlSerializer.os_image_to_xml(label, media_link, name, os),
+ async=True)
+
+ def delete_os_image(self, image_name):
+ '''
+ Deletes the specified OS image from your image repository.
+
+ image_name: The name of the image.
+ '''
+ _validate_not_none('image_name', image_name)
+ return self._perform_delete(self._get_image_path(image_name),
+ async=True)
+
+ #--Operations for virtual machine disks ------------------------------
+ def get_data_disk(self, service_name, deployment_name, role_name, lun):
+ '''
+ Retrieves the specified data disk from a virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: The Logical Unit Number (LUN) for the disk.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_get(self._get_data_disk_path(service_name, deployment_name, role_name, lun),
+ DataVirtualHardDisk)
+
+ def add_data_disk(self, service_name, deployment_name, role_name, lun, host_caching=None, media_link=None, disk_label=None, disk_name=None, logical_disk_size_in_gb=None, source_media_link=None):
+ '''
+ Adds a data disk to a virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through
+ 15.
+ host_caching: Specifies the platform caching behavior of data disk
+ blob for read/write efficiency. The default vault is
+ ReadOnly. Possible values are: None, ReadOnly, ReadWrite
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ disk_label: Specifies the description of the data disk. When you
+ attach a disk, either by directly referencing a media
+ using the MediaLink element or specifying the target disk
+ size, you can use the DiskLabel element to customize the
+ name property of the target data disk.
+ disk_name: Specifies the name of the disk. Windows Azure uses the
+ specified disk to create the data disk for the machine and
+ populates this field with the disk name.
+ logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk
+ to be attached to the role. The disk can be
+ created as part of disk attach or create VM
+ role call by specifying the value for this
+ property. Windows Azure creates the empty
+ disk based on size preference and attaches
+ the newly created disk to the Role.
+ source_media_link: Specifies the location of a blob in account storage
+ which is mounted as a data disk when the virtual
+ machine is created.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_post(self._get_data_disk_path(service_name, deployment_name, role_name),
+ _XmlSerializer.data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, logical_disk_size_in_gb, media_link, source_media_link),
+ async=True)
+
+ def update_data_disk(self, service_name, deployment_name, role_name, lun, host_caching=None, media_link=None, updated_lun=None, disk_label=None, disk_name=None, logical_disk_size_in_gb=None):
+ '''
+ Updates the specified data disk attached to the specified virtual
+ machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through
+ 15.
+ host_caching: Specifies the platform caching behavior of data disk
+ blob for read/write efficiency. The default vault is
+ ReadOnly. Possible values are: None, ReadOnly, ReadWrite
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ updated_lun: Specifies the Logical Unit Number (LUN) for the disk. The
+ LUN specifies the slot in which the data drive appears
+ when mounted for usage by the virtual machine. Valid LUN
+ values are 0 through 15.
+ disk_label: Specifies the description of the data disk. When you
+ attach a disk, either by directly referencing a media
+ using the MediaLink element or specifying the target disk
+ size, you can use the DiskLabel element to customize the
+ name property of the target data disk.
+ disk_name: Specifies the name of the disk. Windows Azure uses the
+ specified disk to create the data disk for the machine and
+ populates this field with the disk name.
+ logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk
+ to be attached to the role. The disk can be
+ created as part of disk attach or create VM
+ role call by specifying the value for this
+ property. Windows Azure creates the empty
+ disk based on size preference and attaches
+ the newly created disk to the Role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_put(self._get_data_disk_path(service_name, deployment_name, role_name, lun),
+ _XmlSerializer.data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, updated_lun, logical_disk_size_in_gb, media_link, None),
+ async=True)
+
+ def delete_data_disk(self, service_name, deployment_name, role_name, lun):
+ '''
+ Removes the specified data disk from a virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: The Logical Unit Number (LUN) for the disk.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_delete(self._get_data_disk_path(service_name, deployment_name, role_name, lun),
+ async=True)
+
+ #--Operations for virtual machine disks ------------------------------
+ def list_disks(self):
+ '''
+ Retrieves a list of the disks in your image repository.
+ '''
+ return self._perform_get(self._get_disk_path(),
+ Disks)
+
+ def get_disk(self, disk_name):
+ '''
+ Retrieves a disk from your image repository.
+ '''
+ return self._perform_get(self._get_disk_path(disk_name),
+ Disk)
+
+ def add_disk(self, has_operating_system, label, media_link, name, os):
+ '''
+ Adds a disk to the user image repository. The disk can be an OS disk
+ or a data disk.
+
+ has_operating_system: Specifies whether the disk contains an operation
+ system. Only a disk with an operating system
+ installed can be mounted as OS Drive.
+ label: Specifies the description of the disk.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the current
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the disk. Windows Azure uses the name to
+ identify the disk when creating virtual machines from the disk.
+ os: The OS type of the disk. Possible values are: Linux, Windows
+ '''
+ _validate_not_none('has_operating_system', has_operating_system)
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_post(self._get_disk_path(),
+ _XmlSerializer.disk_to_xml(has_operating_system, label, media_link, name, os))
+
+ def update_disk(self, disk_name, has_operating_system, label, media_link, name, os):
+ '''
+ Updates an existing disk in your image repository.
+
+ disk_name: The name of the disk to update.
+ has_operating_system: Specifies whether the disk contains an operation
+ system. Only a disk with an operating system
+ installed can be mounted as OS Drive.
+ label: Specifies the description of the disk.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the current
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the disk. Windows Azure uses the name to
+ identify the disk when creating virtual machines from the disk.
+ os: The OS type of the disk. Possible values are: Linux, Windows
+ '''
+ _validate_not_none('disk_name', disk_name)
+ _validate_not_none('has_operating_system', has_operating_system)
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_put(self._get_disk_path(disk_name),
+ _XmlSerializer.disk_to_xml(has_operating_system, label, media_link, name, os))
+
+ def delete_disk(self, disk_name):
+ '''
+ Deletes the specified data or operating system disk from your image
+ repository.
+
+ disk_name: The name of the disk to delete.
+ '''
+ _validate_not_none('disk_name', disk_name)
+ return self._perform_delete(self._get_disk_path(disk_name))
+
+ #--Helper functions --------------------------------------------------
+ def _perform_request(self, request):
+ try:
+ resp = self._filter(request)
+ except HTTPError as e:
+ return _management_error_handler(e)
+
+ return resp
+
+ def _perform_get(self, path, response_type):
+ request = HTTPRequest()
+ request.method = 'GET'
+ request.host = self.host
+ request.path = path
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ return _parse_response(response, response_type)
+
+ def _perform_put(self, path, body, async=False):
+ request = HTTPRequest()
+ request.method = 'PUT'
+ request.host = self.host
+ request.path = path
+ request.body = _get_request_body(body)
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _perform_post(self, path, body, response_type=None, async=False):
+ request = HTTPRequest()
+ request.method = 'POST'
+ request.host = self.host
+ request.path = path
+ request.body = _get_request_body(body)
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if response_type is not None:
+ return _parse_response(response, response_type)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _perform_delete(self, path, async=False):
+ request = HTTPRequest()
+ request.method = 'DELETE'
+ request.host = self.host
+ request.path = path
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _get_path(self, resource, name):
+ path = '/' + self.subscription_id + '/' + resource
+ if name is not None:
+ path += '/' + str(name)
+ return path
+
+ def _get_storage_service_path(self, service_name=None):
+ return self._get_path('services/storageservices', service_name)
+
+ def _get_hosted_service_path(self, service_name=None):
+ return self._get_path('services/hostedservices', service_name)
+
+ def _get_deployment_path_using_slot(self, service_name, slot=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deploymentslots', slot)
+
+ def _get_deployment_path_using_name(self, service_name, deployment_name=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments', deployment_name)
+
+ def _get_role_path(self, service_name, deployment_name, role_name=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments/' + deployment_name + '/roles', role_name)
+
+ def _get_role_instance_operations_path(self, service_name, deployment_name, role_name=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments/' + deployment_name + '/roleinstances', role_name) + '/Operations'
+
+ def _get_data_disk_path(self, service_name, deployment_name, role_name, lun=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments/' + str(deployment_name) + '/roles/' + str(role_name) + '/DataDisks', lun)
+
+ def _get_disk_path(self, disk_name=None):
+ return self._get_path('services/disks', disk_name)
+
+ def _get_image_path(self, image_name=None):
+ return self._get_path('services/images', image_name)
diff --git a/src/azure/storage/__init__.py b/src/azure/storage/__init__.py
index 73eccc93b2f2..a82660709323 100644
--- a/src/azure/storage/__init__.py
+++ b/src/azure/storage/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@
from azure import (_create_entry, METADATA_NS, _parse_response_for_dict,
_get_entry_properties, WindowsAzureError,
_get_child_nodes, _get_child_nodesNS,
- WindowsAzureConflictError,
+ WindowsAzureConflictError, _general_error_handler,
WindowsAzureMissingResourceError, _list_of,
DEV_TABLE_HOST, TABLE_SERVICE_HOST_BASE, DEV_BLOB_HOST,
BLOB_SERVICE_HOST_BASE, DEV_QUEUE_HOST,
@@ -720,12 +720,7 @@ def _convert_xml_to_table(xmlstr):
def _storage_error_handler(http_error):
''' Simple error handler for storage service. Will add more specific cases '''
- if http_error.status == 409:
- raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
- elif http_error.status == 404:
- raise WindowsAzureMissingResourceError(azure._ERROR_NOT_FOUND)
- else:
- raise WindowsAzureError(azure._ERROR_UNKNOWN % http_error.message)
+ return _general_error_handler(http_error)
# make these available just from storage.
from blobservice import BlobService
diff --git a/src/azure/storage/blobservice.py b/src/azure/storage/blobservice.py
index 92186e8638a1..b79dfa420795 100644
--- a/src/azure/storage/blobservice.py
+++ b/src/azure/storage/blobservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,7 +29,6 @@
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class BlobService(_StorageClient):
@@ -39,6 +38,9 @@ class BlobService(_StorageClient):
account_key: your storage account key, required for all operations.
'''
+ def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = BLOB_SERVICE_HOST_BASE, dev_host = DEV_BLOB_HOST):
+ return super(BlobService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
+
def list_containers(self, prefix=None, marker=None, maxresults=None, include=None):
'''
The List Containers operation returns a list of the containers under the specified account.
@@ -54,7 +56,7 @@ def list_containers(self, prefix=None, marker=None, maxresults=None, include=Non
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
@@ -81,7 +83,7 @@ def create_container(self, container_name, x_ms_meta_name_values=None, x_ms_blob
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
@@ -107,7 +109,7 @@ def get_container_properties(self, container_name):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -123,7 +125,7 @@ def get_container_metadata(self, container_name):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -140,7 +142,7 @@ def set_container_metadata(self, container_name, x_ms_meta_name_values=None):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -154,7 +156,7 @@ def get_container_acl(self, container_name):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=acl'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -172,7 +174,7 @@ def set_container_acl(self, container_name, signed_identifiers=None, x_ms_blob_p
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=acl'
request.headers = [('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))]
request.body = _get_request_body(_convert_class_to_xml(signed_identifiers))
@@ -189,7 +191,7 @@ def delete_container(self, container_name, fail_not_exist=False):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -211,7 +213,7 @@ def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None,
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
@@ -238,7 +240,7 @@ def set_blob_service_properties(self, storage_service_properties, timeout=None):
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
@@ -256,7 +258,7 @@ def get_blob_service_properties(self, timeout=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -275,7 +277,7 @@ def get_blob_properties(self, container_name, blob_name, x_ms_lease_id=None):
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'HEAD'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -299,7 +301,7 @@ def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=properties'
request.headers = [
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
@@ -330,7 +332,7 @@ def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_enco
_validate_not_none('x_ms_blob_type', x_ms_blob_type)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-blob-type', _str_or_none(x_ms_blob_type)),
@@ -365,7 +367,7 @@ def get_blob(self, container_name, blob_name, snapshot=None, x_ms_range=None, x_
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
@@ -390,7 +392,7 @@ def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
@@ -412,7 +414,7 @@ def set_blob_metadata(self, container_name, blob_name, x_ms_meta_name_values=Non
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
@@ -436,7 +438,7 @@ def lease_blob(self, container_name, blob_name, x_ms_lease_action, x_ms_lease_id
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
@@ -468,7 +470,7 @@ def snapshot_blob(self, container_name, blob_name, x_ms_meta_name_values=None, i
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=snapshot'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
@@ -515,7 +517,7 @@ def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_
_validate_not_none('x_ms_copy_source', x_ms_copy_source)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-copy-source', _str_or_none(x_ms_copy_source)),
@@ -554,7 +556,7 @@ def delete_blob(self, container_name, blob_name, snapshot=None, x_ms_lease_id=No
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
@@ -580,7 +582,7 @@ def put_block(self, container_name, blob_name, block, blockid, content_md5=None,
_validate_not_none('blockid', blockid)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=block'
request.headers = [
('Content-MD5', _str_or_none(content_md5)),
@@ -624,7 +626,7 @@ def put_block_list(self, container_name, blob_name, block_list, content_md5=None
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=blocklist'
request.headers = [
('Content-MD5', _str_or_none(content_md5)),
@@ -656,7 +658,7 @@ def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=blocklist'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [
@@ -697,7 +699,7 @@ def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write,
_validate_not_none('x_ms_page_write', x_ms_page_write)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=page'
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
@@ -736,7 +738,7 @@ def get_page_ranges(self, container_name, blob_name, snapshot=None, range=None,
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=pagelist'
request.headers = [
('Range', _str_or_none(range)),
diff --git a/src/azure/storage/cloudstorageaccount.py b/src/azure/storage/cloudstorageaccount.py
index 39ea96f9331f..ead0928ac99c 100644
--- a/src/azure/storage/cloudstorageaccount.py
+++ b/src/azure/storage/cloudstorageaccount.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/src/azure/storage/queueservice.py b/src/azure/storage/queueservice.py
index baffc0a005c2..778dcc776f0d 100644
--- a/src/azure/storage/queueservice.py
+++ b/src/azure/storage/queueservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,7 +28,6 @@
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class QueueService(_StorageClient):
@@ -38,6 +37,9 @@ class QueueService(_StorageClient):
account_key: your storage account key, required for all operations.
'''
+ def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = QUEUE_SERVICE_HOST_BASE, dev_host = DEV_QUEUE_HOST):
+ return super(QueueService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
+
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including Windows Azure
@@ -48,7 +50,7 @@ def get_queue_service_properties(self, timeout=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -63,7 +65,7 @@ def list_queues(self, prefix=None, marker=None, maxresults=None, include=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
@@ -89,7 +91,7 @@ def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=Fal
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -119,7 +121,7 @@ def delete_queue(self, queue_name, fail_not_exist=False):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
@@ -144,7 +146,7 @@ def get_queue_metadata(self, queue_name):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
@@ -164,7 +166,7 @@ def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -190,7 +192,7 @@ def put_message(self, queue_name, message_text, visibilitytimeout=None, messaget
_validate_not_none('message_text', message_text)
request = HTTPRequest()
request.method = 'POST'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.query = [
('visibilitytimeout', _str_or_none(visibilitytimeout)),
@@ -222,7 +224,7 @@ def get_messages(self, queue_name, numofmessages=None, visibilitytimeout=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.query = [
('numofmessages', _str_or_none(numofmessages)),
@@ -247,7 +249,7 @@ def peek_messages(self, queue_name, numofmessages=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages?peekonly=true'
request.query = [('numofmessages', _str_or_none(numofmessages))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -269,7 +271,7 @@ def delete_message(self, queue_name, message_id, popreceipt):
_validate_not_none('popreceipt', popreceipt)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(message_id) + ''
request.query = [('popreceipt', _str_or_none(popreceipt))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -285,7 +287,7 @@ def clear_messages(self, queue_name):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
@@ -312,7 +314,7 @@ def update_message(self, queue_name, message_id, message_text, popreceipt, visib
_validate_not_none('visibilitytimeout', visibilitytimeout)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(message_id) + ''
request.query = [
('popreceipt', _str_or_none(popreceipt)),
@@ -339,12 +341,10 @@ def set_queue_service_properties(self, storage_service_properties, timeout=None)
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
-
-
diff --git a/src/azure/storage/sharedaccesssignature.py b/src/azure/storage/sharedaccesssignature.py
index a7850702fa5c..c80ce63ba59d 100644
--- a/src/azure/storage/sharedaccesssignature.py
+++ b/src/azure/storage/sharedaccesssignature.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/src/azure/storage/storageclient.py b/src/azure/storage/storageclient.py
index 862da608b450..1bb259c391e8 100644
--- a/src/azure/storage/storageclient.py
+++ b/src/azure/storage/storageclient.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
import os
-from azure.storage import _storage_error_handler, X_MS_VERSION
+from azure.storage import _storage_error_handler
from azure.http.httpclient import _HTTPClient
from azure.http import HTTPError
from azure import (_parse_response, WindowsAzureError,
@@ -38,7 +38,7 @@ class _StorageClient(object):
This is the base class for BlobManager, TableManager and QueueManager.
'''
- def __init__(self, account_name=None, account_key=None, protocol='http'):
+ def __init__(self, account_name=None, account_key=None, protocol='http', host_base='', dev_host=''):
if account_name is not None:
self.account_name = account_name.encode('ascii', 'ignore')
else:
@@ -50,6 +50,8 @@ def __init__(self, account_name=None, account_key=None, protocol='http'):
self.requestid = None
self.protocol = protocol
+ self.host_base = host_base
+ self.dev_host = dev_host
#the app is not run in azure emulator or use default development
#storage account and key if app is run in emulator.
@@ -82,8 +84,7 @@ def __init__(self, account_name=None, account_key=None, protocol='http'):
if not self.account_name or not self.account_key:
raise WindowsAzureError(azure._ERROR_STORAGE_MISSING_INFO)
- self.x_ms_version = X_MS_VERSION
- self._httpclient = _HTTPClient(service_instance=self, account_key=self.account_key, account_name=self.account_name, x_ms_version=self.x_ms_version, protocol=protocol)
+ self._httpclient = _HTTPClient(service_instance=self, account_key=self.account_key, account_name=self.account_name, protocol=protocol)
self._batchclient = None
self._filter = self._perform_request_worker
@@ -102,6 +103,16 @@ def new_filter(request):
res._filter = new_filter
return res
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ def _get_host(self):
+ if self.use_local_storage:
+ return self.dev_host
+ else:
+ return self.account_name + self.host_base
+
def _perform_request_worker(self, request):
return self._httpclient.perform_request(request)
diff --git a/src/azure/storage/tableservice.py b/src/azure/storage/tableservice.py
index 9de4858d7b9e..4240009385fc 100644
--- a/src/azure/storage/tableservice.py
+++ b/src/azure/storage/tableservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -32,7 +32,6 @@
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class TableService(_StorageClient):
@@ -42,6 +41,9 @@ class TableService(_StorageClient):
account_key: your storage account key, required for all operations.
'''
+ def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = TABLE_SERVICE_HOST_BASE, dev_host = DEV_TABLE_HOST):
+ return super(TableService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
+
def begin_batch(self):
if self._batchclient is None:
self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)
@@ -64,7 +66,7 @@ def get_table_service_properties(self):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -81,7 +83,7 @@ def set_table_service_properties(self, storage_service_properties):
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -99,7 +101,7 @@ def query_tables(self, table_name = None, top=None, next_table_name=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
if table_name is not None:
uri_part_table_name = "('" + table_name + "')"
else:
@@ -127,7 +129,7 @@ def create_table(self, table, fail_on_exist=False):
_validate_not_none('table', table)
request = HTTPRequest()
request.method = 'POST'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/Tables'
request.body = _get_request_body(convert_table_to_xml(table))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -152,7 +154,7 @@ def delete_table(self, table_name, fail_not_exist=False):
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/Tables(\'' + str(table_name) + '\')'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -181,7 +183,7 @@ def get_entity(self, table_name, partition_key, row_key, select=''):
_validate_not_none('select', select)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')?$select=' + str(select) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -201,7 +203,7 @@ def query_entities(self, table_name, filter=None, select=None, top=None, next_pa
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '()'
request.query = [
('$filter', _str_or_none(filter)),
@@ -228,7 +230,7 @@ def insert_entity(self, table_name, entity, content_type='application/atom+xml')
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'POST'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + ''
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
@@ -255,7 +257,7 @@ def update_entity(self, table_name, partition_key, row_key, entity, content_type
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
@@ -285,7 +287,7 @@ def merge_entity(self, table_name, partition_key, row_key, entity, content_type=
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
@@ -315,7 +317,7 @@ def delete_entity(self, table_name, partition_key, row_key, content_type='applic
_validate_not_none('if_match', if_match)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
@@ -343,7 +345,7 @@ def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, c
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
@@ -371,7 +373,7 @@ def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, con
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
@@ -388,5 +390,3 @@ def _perform_request_worker(self, request):
self.account_key)
request.headers.append(('Authorization', auth))
return self._httpclient.perform_request(request)
-
-
diff --git a/src/build.bat b/src/build.bat
index 17d39bcde4af..b1aa2e403a09 100644
--- a/src/build.bat
+++ b/src/build.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/src/codegenerator/codegenerator.py b/src/codegenerator/codegenerator.py
index eb6dc6e4d814..e6d5c7b04687 100644
--- a/src/codegenerator/codegenerator.py
+++ b/src/codegenerator/codegenerator.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -714,7 +714,7 @@ def add_license(license_str, output_file_name):
output_file.close()
license_str = '''#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/src/install.bat b/src/install.bat
index f0a169369c8b..be9180d5b51f 100644
--- a/src/install.bat
+++ b/src/install.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/src/installfrompip.bat b/src/installfrompip.bat
index 5b5fbfb091d6..c6e711ce418d 100644
--- a/src/installfrompip.bat
+++ b/src/installfrompip.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/src/setup.py b/src/setup.py
index e40709494cb6..3f38f1e8c237 100644
--- a/src/setup.py
+++ b/src/setup.py
@@ -1,26 +1,29 @@
#!/usr/bin/env python
-#------------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation.
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
#
-# This source code is subject to terms and conditions of the Apache License,
-# Version 2.0. A copy of the license can be found in the License.html file at
-# the root of this distribution. If you cannot locate the Apache License,
-# Version 2.0, please send an email to vspython@microsoft.com. By using this
-# source code in any fashion, you are agreeing to be bound by the terms of the
-# Apache License, Version 2.0.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# You must not remove this notice, or any other, from this software.
-#------------------------------------------------------------------------------
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
from distutils.core import setup
setup(name='azure',
- version='0.2.3',
+ version='0.6',
description='Windows Azure client APIs',
url='https://github.com/WindowsAzure/azure-sdk-for-python',
packages=['azure',
'azure.http',
'azure.servicebus',
- 'azure.storage']
+ 'azure.storage',
+ 'azure.servicemanagement']
)
diff --git a/src/upload.bat b/src/upload.bat
index 3e953e29013a..cc12d7da0c0d 100644
--- a/src/upload.bat
+++ b/src/upload.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/test/azuretest.pyproj b/test/azuretest.pyproj
index d787ba2083c6..507971137014 100644
--- a/test/azuretest.pyproj
+++ b/test/azuretest.pyproj
@@ -6,7 +6,7 @@
{c0742a2d-4862-40e4-8a28-036eecdbc614}
- azuretest\test_servicebusservice.py
+ azuretest\test_servicemanagementservice.py
.
.
azuretest
@@ -21,7 +21,7 @@
False
9a7a9026-48c1-4688-9d5d-e5699d47d074
2.7
- C:\Users\a-huvalo\Documents\Visual Studio 2010\Projects\PTVS\Open_Source\Incubation\windowsazure\src\
+ ..\src\;..\test\
$/TCWCS/Python/Main/Open_Source/Incubation/windowsazure/test
{4CA58AB2-18FA-4F8D-95D4-32DDF27D184C}
http://tcvstf:8080/tfs/tc
@@ -37,6 +37,7 @@
+
@@ -47,6 +48,7 @@
+
@@ -55,6 +57,8 @@
+
+
diff --git a/test/azuretest/__init__.py b/test/azuretest/__init__.py
index 330ef2588479..356b5e002a1d 100644
--- a/test/azuretest/__init__.py
+++ b/test/azuretest/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/clean.py b/test/azuretest/clean.py
index 76035675e512..164fb886bc77 100644
--- a/test/azuretest/clean.py
+++ b/test/azuretest/clean.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/data/WindowsAzure1.cspkg b/test/azuretest/data/WindowsAzure1.cspkg
new file mode 100644
index 000000000000..385a79d1c417
Binary files /dev/null and b/test/azuretest/data/WindowsAzure1.cspkg differ
diff --git a/test/azuretest/data/test.vhd b/test/azuretest/data/test.vhd
new file mode 100644
index 000000000000..28b20ae4f1bc
Binary files /dev/null and b/test/azuretest/data/test.vhd differ
diff --git a/test/azuretest/doctest_blobservice.py b/test/azuretest/doctest_blobservice.py
index ca1f29456c90..3645ff3d924c 100644
--- a/test/azuretest/doctest_blobservice.py
+++ b/test/azuretest/doctest_blobservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/doctest_queueservice.py b/test/azuretest/doctest_queueservice.py
index 5c1cb4e8cb90..ae0730bd6b47 100644
--- a/test/azuretest/doctest_queueservice.py
+++ b/test/azuretest/doctest_queueservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/doctest_servicebusservicequeue.py b/test/azuretest/doctest_servicebusservicequeue.py
index 984170d68d90..408beaae6cd3 100644
--- a/test/azuretest/doctest_servicebusservicequeue.py
+++ b/test/azuretest/doctest_servicebusservicequeue.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/doctest_servicebusservicetopic.py b/test/azuretest/doctest_servicebusservicetopic.py
index d6f502c51542..ad9b1908979b 100644
--- a/test/azuretest/doctest_servicebusservicetopic.py
+++ b/test/azuretest/doctest_servicebusservicetopic.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/doctest_tableservice.py b/test/azuretest/doctest_tableservice.py
index b93b0274ae74..d98e9866b41a 100644
--- a/test/azuretest/doctest_tableservice.py
+++ b/test/azuretest/doctest_tableservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -24,25 +24,25 @@
How to Add an Entity to a Table
-------------------------------
>>> task = {'PartitionKey': 'tasksSeattle', 'RowKey': '1', 'description' : 'Take out the trash', 'priority' : 200}
->>> table_service.insert_entity('tasktable', task)
+>>> entity = table_service.insert_entity('tasktable', task)
>>> task = Entity()
>>> task.PartitionKey = 'tasksSeattle'
>>> task.RowKey = '2'
>>> task.description = 'Wash the car'
>>> task.priority = 100
->>> table_service.insert_entity('tasktable', task)
+>>> entity = table_service.insert_entity('tasktable', task)
How to Update an Entity
-----------------------
>>> task = {'description' : 'Take out the garbage', 'priority' : 250}
->>> table_service.update_entity('tasktable', 'tasksSeattle', '1', task)
+>>> entity = table_service.update_entity('tasktable', 'tasksSeattle', '1', task)
>>> task = {'description' : 'Take out the garbage again', 'priority' : 250}
->>> table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '1', task)
+>>> entity = table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '1', task)
>>> task = {'description' : 'Buy detergent', 'priority' : 300}
->>> table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '3', task)
+>>> entity = table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '3', task)
How to Change a Group of Entities
diff --git a/test/azuretest/test_blobservice.py b/test/azuretest/test_blobservice.py
index a04e5fca19c1..087068c67c5b 100644
--- a/test/azuretest/test_blobservice.py
+++ b/test/azuretest/test_blobservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,6 +29,11 @@ def setUp(self):
self.bc = BlobService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.bc.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
container_base_name = u'mytestcontainer%s' % (__uid)
diff --git a/test/azuretest/test_cloudstorageaccount.py b/test/azuretest/test_cloudstorageaccount.py
index 198a61c251a7..01eed4a2af8a 100644
--- a/test/azuretest/test_cloudstorageaccount.py
+++ b/test/azuretest/test_cloudstorageaccount.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/test_queueservice.py b/test/azuretest/test_queueservice.py
index a89cdbba2a1d..c058b6c8a364 100644
--- a/test/azuretest/test_queueservice.py
+++ b/test/azuretest/test_queueservice.py
@@ -1,6 +1,5 @@
-
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,6 +28,11 @@ def setUp(self):
self.queue_client = QueueService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.queue_client.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
queue_base_name = u'%s' % (__uid)
diff --git a/test/azuretest/test_servicebusservice.py b/test/azuretest/test_servicebusservice.py
index 7ff352e11b08..d5a6ca0dd053 100644
--- a/test/azuretest/test_servicebusservice.py
+++ b/test/azuretest/test_servicebusservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -26,6 +26,11 @@ def setUp(self):
credentials.getServiceBusKey(),
'owner')
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.sbs.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
queue_base_name = u'mytestqueue%s' % (__uid)
diff --git a/test/azuretest/test_servicemanagementservice.py b/test/azuretest/test_servicemanagementservice.py
new file mode 100644
index 000000000000..d3382dae9c5d
--- /dev/null
+++ b/test/azuretest/test_servicemanagementservice.py
@@ -0,0 +1,1754 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+from azure import *
+from azure.servicemanagement import *
+from azure.storage.blobservice import *
+from azuretest.util import *
+
+import unittest
+import base64
+
+MANAGEMENT_CERT_PUBLICKEY = 'MIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQAB'
+MANAGEMENT_CERT_DATA = 'MIIC9jCCAeKgAwIBAgIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAMBUxEzARBgNVBAMTClB5dGhvblRlc3QwHhcNMTIwODMwMDAyNTMzWhcNMzkxMjMxMjM1OTU5WjAVMRMwEQYDVQQDEwpQeXRob25UZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQABo0owSDBGBgNVHQEEPzA9gBBS6knRHo54LppngxVCCzZVoRcwFTETMBEGA1UEAxMKUHl0aG9uVGVzdIIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAA4IBAQAnZbP3YV+08wI4YTg6MOVA+j1njd0kVp35FLehripmaMNE6lgk3Vu1MGGl0JnvMr3fNFGFzRske/jVtFxlHE5H/CoUzmyMQ+W06eV/e995AduwTKsS0ZgYn0VoocSXWst/nyhpKOcbJgAOohOYxgsGI1JEqQgjyeqzcCIhw/vlWiA3V8bSiPnrC9vwhH0eB025hBd2VbEGDz2nWCYkwtuOLMTvkmLi/oFw3GOfgagZKk8k/ZPffMCafz+yR3vb1nqAjncrVcJLI8amUfpxhjZYexo8MbxBA432M6w8sjXN+uLCl7ByWZ4xs4vonWgkmjeObtU37SIzolHT4dxIgaP2'
+
+SERVICE_CERT_FORMAT = 'pfx'
+SERVICE_CERT_PASSWORD = 'Python'
+SERVICE_CERT_DATA = 'MIIJ7AIBAzCCCagGCSqGSIb3DQEHAaCCCZkEggmVMIIJkTCCBfoGCSqGSIb3DQEHAaCCBesEggXnMIIF4zCCBd8GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAhxOU59DvbmnAICB9AEggTYNM2UfOCtA1G0fhKNmu79z8/yUm5ybh5JamZqZ4Ra21wTc1khmVmWr0OAYhttaKtqtHfyFv7UY/cojg+fdOPCI+Fa8qQI7oXGEU7hS4O7VH3R/bDESctPB4TRdhjb88hLC+CdQc64PwjHFoaUHEQHFMsi7ujbi1u4Xg8YRqg4eKoG0AAraEQgyS3+1oWndtUOdqvOAsAG/bshiK47pgxMTgHpYjtOMtjcPqrwYq5aZQNWdJMXjl4JnmGJpO1dGqlSyr3uJuPobuq18diFS+JMJk/nQt50GF/SkscQn3TCLc6g6AjuKqdnSQTM34eNkZanKyyBuRmVUvM+zcKP6riiRDB86wrfNcT3sPDh9x6BSiTaxWKDk4IziWUMy8WJ/qItaVm2klIyez9JeEgcN2PhI2B1SFxH2qliyCmJ+308RFJHlQZDNZhpTRNgkulYfiswr5xOVEcU7J6eithmmD72xANSiiTbtFH10Bu10FN4SbSvOYQiGIjDVG4awAPVC9gURm88PciIimz1ne0WN3Ioj92BTC78kNoMI7+NDiVV01W+/CNK8J1WCTkKWRxTui8Ykm2z63gh9KmSZyEstFDFIz2WbJEKM8N4vjzGpNhRYOHpxFaCm2E/yoNj4MyHmo9XGtYsqhA0Jy12Wmx/fVGeZb3Az8Y5MYCQasc9XwvzACf2+RKsz6ey7jTb+Exo0gQB13PNFLEs83R57bDa8vgQupYBFcsamw/RvqmXn8sGw53kd71VVElrfaCNluvAFrLPdaH3F/+J8KHdV7Xs9A1ITvgpHbw2BnQBPwH3pSXZYh5+7it6WSNIHbv8h33Ue+vPLby5Huhg86R4nZkjJbeQXsfVpvC+llhOBHUX+UJth76a/d0iAewPO90rDNx+Nqff+Q7hPoUgxE8HtrbhZNY3qNFfyRGLbCZJpb+6DE7WsDSogFE5gY7gnmJwtT+FBlIocysaBn1NMH8fo/2zyuAOUfjHvuIR+K/NzcMdn5WL7bYjmvJwRIAaPScZV56NzNbZdHsHAU2ujvE+sGNmwr4wz3Db6Q9VfzkNWEzDmRlYEsRYNqQ/E7O2KQWETzZSGTEXgz57APE0d/cOprX+9PXZTdqqjOCU12YLtJobIcBZz+AFPMJRlY+pjuIu8wTzbWX7yoek3zmN9iZAZT5gNYCwIwo06Of2gvgssQ4X53QmJc/oD6WSyZpcS67JOQ8bHXIT1Lg9FBAfgXWEQ+BwIBK1SEJYlZJm0JkJ3Og7t3rgAmuv5YOfbFLo484946izfQeoUF5qrn/qSiqNOnYNMLvaXWT2pWE9V6u8max0l5dA5qNR772ahMQEH1iZu/K8gKfQ/z6Ea1yxFVwGtf9uNSuvS2M3MFa4Dos8FtxxQgOIEoiV4qc2yQIyiAKYusRI+K3PMnqSyg9S3eh0LCbuI8CYESpolrFCMyNFSwJpM+pUDA5GkRM/gYGLAhtZtLxgZBZYn81DgiRmk4igRIjNKWcy5l0eWN5KPBQve0QVXFB9z0A2GqOGEHJTZS5rww61hVaNyp2nBa8Mrd9afnogoEcb1SBRsU5QTsP91XGj8zdljL2t+jJDNUxi6nbNQN6onRY1ewpdCKxFzFyR/75nrEPBd8UrDTZ7k/FcNxIlAA2KPH2Dt3r8EZfEKDGBzTATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IAGUANAA1ADcAOQAyAGYAYQAtAGUAMQA3AGUALQA0ADEAMgAzAC0AOQBiAGYANwAtADEAZQBjADkAMQA4ADMAOQAxAGIAOAAxMF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABvAGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIwggOPBgkqhkiG9w0BBwagggOAMIIDfAIBADCCA3UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECLA43UrS9nGWAgIH0ICCA0isAHOSVK2C8XAZpu2dTiJfB51QqgbUuZ4QdPu+INKT3x5x775SMC2wbFEjvjhA3hys6D/ALV4q97JpKc6YUDZMP4zl2yYx6Pr6chTudRCwlrAKqk0Sp0IBZrxZBVBgRsz9pt3VRR9bI9ElHD8j/ahZ+Hx+mxlfUePrabOqlzw9FVmrqBIhhmAs9Ax0l5mvY3p7ww1Vm0K2sVdOZdsKx27Cf7rg4rC6JJ3tPvTfJDUkTCPFgFtam+vZSiMoYbz00Kj2uPBJbkpG2ngjK8ONHzWq8PF6K6Feut5vrjeswR/bm9gGPtrjAU0qBuP5YfJqei6zvs+hXzYOcnnhxFlfHz/QvVJM9losSm17kq0SSqG4HD1XF6C6eiH3pySa2mnw3kEivulBYFUO2jmSGroNlwz6/LVoM+801h0vJayFxP7xRntQr0z5agzyNfCZ8249dgJ4y2UJmSRArdv5h+gYXIra2pNRHVUfPFTIZw3Yf5Uhz83ta3JxIM0BCtwQBsWpJSs3q9tokLQa/wJY6Qj5pVw3pxv+497DrOVCiCwAI3GVTa0QylscKFMnEjxIpYCLDNnY0fRXDYA94AfhDkdjlXLMFZLuwRrfTHqfyaDuFdq9cT2FuhM1J73reMriMGfu+UzTTWd4UZa/mGGRZM9eWvrIvgkvLQr+T250wa7igbJwh3FXRm7TqZSkLOpW3p+Losw0GJIz2k5DW61gkPYY0hMwzpniDrN8pc5BCo8Wtb4UBfW5+J5oQn2oKj2B3BuflL+jgYjXb6YRe1TTstJWmTR4/CrZc2ecNHTMGYlr7bOptaGcw9z/JaCjdoElUNSITVj6TQCa//jko+tdbM1cCtzE7Ty8ARs2XghxbhgLV5KyYZ0q06/tYvaT0vx4PZi64X1weIEmcHJRgdz9dC3+8SrtABoxxft9MD7DvtRNcWiZ+qdKfKEsGgZXYAPgYg/xObaiR9Sz2QGYv1BqoNAtalJLscn7UmGZnzjgyvD3GpvxPnZIZr3pAAyWZKUsL7eFCDjwJu/DlUni31ZI0sNJvcJZkWl5gGtuoTf3q4v80wKlNFVsUCrWRosITNlQun8Q+0NR6MZp8vvMKfRnJr7CkcZOAa7rzZjGF+EwOzAfMAcGBSsOAwIaBBQyyvu2Rm6lFW3e9sQk83bjO1g2pAQU8PYpZ4LXqCe9cmNgCFNqmt4fCOQCAgfQ'
+SERVICE_CERT_DATA_PUBLIC = 'MIIC9jCCAeKgAwIBAgIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAMBUxEzARBgNVBAMTClB5dGhvblRlc3QwHhcNMTIwODMwMDAyNTMzWhcNMzkxMjMxMjM1OTU5WjAVMRMwEQYDVQQDEwpQeXRob25UZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQABo0owSDBGBgNVHQEEPzA9gBBS6knRHo54LppngxVCCzZVoRcwFTETMBEGA1UEAxMKUHl0aG9uVGVzdIIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAA4IBAQAnZbP3YV+08wI4YTg6MOVA+j1njd0kVp35FLehripmaMNE6lgk3Vu1MGGl0JnvMr3fNFGFzRske/jVtFxlHE5H/CoUzmyMQ+W06eV/e995AduwTKsS0ZgYn0VoocSXWst/nyhpKOcbJgAOohOYxgsGI1JEqQgjyeqzcCIhw/vlWiA3V8bSiPnrC9vwhH0eB025hBd2VbEGDz2nWCYkwtuOLMTvkmLi/oFw3GOfgagZKk8k/ZPffMCafz+yR3vb1nqAjncrVcJLI8amUfpxhjZYexo8MbxBA432M6w8sjXN+uLCl7ByWZ4xs4vonWgkmjeObtU37SIzolHT4dxIgaP2'
+SERVICE_CERT_THUMBPRINT = 'BEA4B74BD6B915E9DD6A01FB1B8C3C1740F517F2'
+SERVICE_CERT_THUMBALGO = 'sha1'
+
+DEPLOYMENT_ORIGINAL_CONFIG = '''
+
+
+
+
+
+
+'''
+
+DEPLOYMENT_UPDATE_CONFIG = '''
+
+
+
+
+
+
+'''
+
+CSPKG_PATH = 'azuretest/data/WindowsAzure1.cspkg'
+DATA_VHD_PATH = 'azuretest/data/test.vhd'
+
+LINUX_IMAGE_NAME = 'OpenLogic__OpenLogic-CentOS-62-20120531-en-us-30GB.vhd'
+WINDOWS_IMAGE_NAME = 'MSFT__Win2K8R2SP1-Datacenter-201208.01-en.us-30GB.vhd'
+
+# This blob must be created manually before running the unit tests,
+# they must be present in the storage account listed in the credentials file.
+LINUX_OS_VHD_URL = credentials.getLinuxOSVHD()
+
+# The easiest way to create a Linux OS vhd is to use the Azure management
+# portal to create a Linux VM, and have it store the VHD in the
+# storage account listed in the credentials file. Then stop the VM,
+# and use the following code to copy the VHD to another blob (if you
+# try to use the VM's VHD directly without making a copy, you will get
+# conflict errors).
+
+#sourceblob = '/%s/%s/%s' % (credentials.getStorageServicesName(), 'vhdcontainername', 'vhdblobname')
+#self.bc.copy_blob('vhdcontainername', 'targetvhdblobname', sourceblob)
+
+
+#------------------------------------------------------------------------------
+class ServiceManagementServiceTest(AzureTestCase):
+
+ def setUp(self):
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+
+ self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
+ if proxy_host:
+ self.sms.set_proxy(proxy_host, proxy_port)
+
+ self.bc = BlobService(account_name=credentials.getStorageServicesName(), account_key=credentials.getStorageServicesKey())
+ if proxy_host:
+ self.bc.set_proxy(proxy_host, proxy_port)
+
+ self.affinity_group_name = getUniqueNameBasedOnCurrentTime('utaffgrp')
+ self.management_certificate_name = getUniqueNameBasedOnCurrentTime('utmgmtcert')
+ self.hosted_service_name = getUniqueNameBasedOnCurrentTime('utsvc')
+ self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage')
+ self.container_name = getUniqueNameBasedOnCurrentTime('utctnr')
+ self.disk_name = getUniqueNameBasedOnCurrentTime('utdisk')
+ self.os_image_name = getUniqueNameBasedOnCurrentTime('utosimg')
+
+ self.data_disk_info = None
+
+ def tearDown(self):
+ if self.data_disk_info is not None:
+ try:
+ disk = self.sms.get_data_disk(self.data_disk_info[0], self.data_disk_info[1], self.data_disk_info[2], self.data_disk_info[3])
+ try:
+ result = self.sms.delete_data_disk(self.data_disk_info[0], self.data_disk_info[1], self.data_disk_info[2], self.data_disk_info[3])
+ self._wait_for_async(result.request_id)
+ except: pass
+ try:
+ self.sms.delete_disk(disk.disk_name)
+ except: pass
+ except: pass
+
+ disk_names = [self.disk_name]
+
+ try:
+ # Can't delete a hosted service if it has deployments, so delete those first
+ props = self.sms.get_hosted_service_properties(self.hosted_service_name, True)
+ for deployment in props.deployments:
+ try:
+ for role in deployment.role_list:
+ role_props = self.sms.get_role(self.hosted_service_name, deployment.name, role.role_name)
+ if role_props.os_virtual_hard_disk.disk_name not in disk_names:
+ disk_names.append(role_props.os_virtual_hard_disk.disk_name)
+ except: pass
+
+ try:
+ result = self.sms.delete_deployment(self.hosted_service_name, deployment.name)
+ self._wait_for_async(result.request_id)
+ except: pass
+ self.sms.delete_hosted_service(self.hosted_service_name)
+ except: pass
+
+ try:
+ self.sms.delete_storage_account(self.storage_account_name)
+ except: pass
+
+ try:
+ self.sms.delete_affinity_group(self.affinity_group_name)
+ except: pass
+
+ try:
+ self.sms.delete_management_certificate(self.management_certificate_name)
+ except: pass
+
+ try:
+ result = self.sms.delete_os_image(self.os_image_name)
+ self._wait_for_async(result.request_id)
+ except: pass
+
+ for disk_name in disk_names:
+ try:
+ self.sms.delete_disk(disk_name)
+ except: pass
+
+ try:
+ self.bc.delete_container(self.container_name)
+ except: pass
+
+ #--Helpers-----------------------------------------------------------------
+ def _wait_for_async(self, request_id):
+ result = self.sms.get_operation_status(request_id)
+ while result.status == 'InProgress':
+ time.sleep(5)
+ result = self.sms.get_operation_status(request_id)
+ self.assertEqual(result.status, 'Succeeded')
+
+ def _wait_for_deployment_status(self, service_name, deployment_name, status):
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ while props.status != status:
+ time.sleep(5)
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+
+ def _wait_for_role_instance_status(self, service_name, deployment_name, role_instance_name, status):
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ while self._get_role_instance_status(props, role_instance_name) != status:
+ time.sleep(5)
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+
+ def _wait_for_rollback_allowed(self, service_name, deployment_name):
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ while props.rollback_allowed == False:
+ time.sleep(5)
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+
+ def _get_role_instance_status(self, deployment, role_instance_name):
+ for role_instance in deployment.role_instance_list:
+ if role_instance.instance_name == role_instance_name:
+ return role_instance.instance_status
+ return None
+
+ def _create_hosted_service(self, name):
+ result = self.sms.create_hosted_service(name, name + 'label', name + 'description', 'West US', None, {'ext1':'val1', 'ext2':42})
+ self.assertIsNone(result)
+
+ def _hosted_service_exists(self, name):
+ try:
+ props = self.sms.get_hosted_service_properties(name)
+ return props is not None
+ except:
+ return False
+
+ def _create_service_certificate(self, service_name, data, format, password):
+ result = self.sms.add_service_certificate(service_name, data, format, password)
+ self._wait_for_async(result.request_id)
+
+ def _service_certificate_exists(self, service_name, thumbalgorithm, thumbprint):
+ try:
+ props = self.sms.get_service_certificate(service_name, thumbalgorithm, thumbprint)
+ return props is not None
+ except:
+ return False
+
+ def _deployment_exists(self, service_name, deployment_name):
+ try:
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ return props is not None
+ except:
+ return False
+
+ def _create_container_and_block_blob(self, container_name, blob_name, blob_data):
+ self.bc.create_container(container_name, None, 'container', False)
+ resp = self.bc.put_blob(container_name, blob_name, blob_data, 'BlockBlob')
+ self.assertIsNone(resp)
+
+ def _create_container_and_page_blob(self, container_name, blob_name, content_length):
+ self.bc.create_container(container_name, None, 'container', False)
+ resp = self.bc.put_blob(container_name, blob_name, '', 'PageBlob', x_ms_blob_content_length=str(content_length))
+ self.assertIsNone(resp)
+
+ def _upload_file_to_block_blob(self, file_path, blob_name):
+ data = open(file_path, 'rb').read()
+ url = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + blob_name
+ self._create_container_and_block_blob(self.container_name, blob_name, data)
+ return url
+
+ def _upload_chunks(self, file_path, blob_name, chunk_size):
+ index = 0
+ with open(file_path, 'rb') as f:
+ while True:
+ data = f.read(chunk_size)
+ if data:
+ length = len(data)
+ self.bc.put_page(self.container_name, blob_name, data, 'bytes=' + str(index) + '-' + str(index + length - 1), 'update')
+ index += length
+ else:
+ break
+
+ def _upload_file_to_page_blob(self, file_path, blob_name):
+ url = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + blob_name
+ content_length = os.path.getsize(file_path)
+ self._create_container_and_page_blob(self.container_name, blob_name, content_length)
+ self._upload_chunks(file_path, blob_name, 262144)
+ return url
+
+ def _upload_default_package_to_storage_blob(self, blob_name):
+ return self._upload_file_to_block_blob(CSPKG_PATH, blob_name)
+
+ def _upload_disk_to_storage_blob(self, blob_name):
+ return self._upload_file_to_page_blob(DATA_VHD_PATH, blob_name)
+
+ def _add_deployment(self, service_name, deployment_name, deployment_slot='Production'):
+ configuration = base64.b64encode(DEPLOYMENT_ORIGINAL_CONFIG)
+ package_url = self._upload_default_package_to_storage_blob(deployment_name + 'Blob')
+ result = self.sms.create_deployment(service_name, deployment_slot, deployment_name, package_url, deployment_name + 'label', configuration, False, False, { 'dep1':'val1', 'dep2':'val2'})
+ self._wait_for_async(result.request_id)
+
+ def _create_hosted_service_with_deployment(self, service_name, deployment_name):
+ self._create_hosted_service(service_name)
+ self._add_deployment(service_name, deployment_name)
+
+ def _create_affinity_group(self, name):
+ result = self.sms.create_affinity_group(name, 'tstmgmtaffgrp', 'West US', 'tstmgmt affinity group')
+ self.assertIsNone(result)
+
+ def _affinity_group_exists(self, name):
+ try:
+ props = self.sms.get_affinity_group_properties(name)
+ return props is not None
+ except:
+ return False
+
+ def _create_management_certificate(self, thumbprint):
+ result = self.sms.add_management_certificate(MANAGEMENT_CERT_PUBLICKEY, thumbprint, MANAGEMENT_CERT_DATA)
+ self.assertIsNone(result)
+
+ def _management_certificate_exists(self, thumbprint):
+ try:
+ props = self.sms.get_management_certificate(thumbprint)
+ return props is not None
+ except:
+ return False
+
+ def _create_storage_account(self, name):
+ result = self.sms.create_storage_account(name, name + 'description', name + 'label', None, 'West US', False, {'ext1':'val1', 'ext2':42})
+ self._wait_for_async(result.request_id)
+
+ def _storage_account_exists(self, name):
+ try:
+ props = self.sms.get_storage_account_properties(name)
+ return props is not None
+ except:
+ return False
+
+ def _role_exists(self, service_name, deployment_name, role_name):
+ try:
+ props = self.sms.get_role(service_name, deployment_name, role_name)
+ return props is not None
+ except:
+ return False
+
+ def _create_disk(self, disk_name, os, url):
+ result = self.sms.add_disk(False, disk_name, url, disk_name, os)
+ self.assertIsNone(result)
+
+ def _disk_exists(self, disk_name):
+ try:
+ disk = self.sms.get_disk(disk_name)
+ return disk is not None
+ except:
+ return False
+
+ def _create_os_image(self, name, blob_url, os):
+ result = self.sms.add_os_image(name + 'label', blob_url, name, os)
+ self._wait_for_async(result.request_id)
+
+ def _os_image_exists(self, image_name):
+ try:
+ image = self.sms.get_os_image(image_name)
+ return image is not None
+ except:
+ return False
+
+ def _blob_exists(self, container_name, blob_name):
+ try:
+ props = self.bc.get_blob_properties(container_name, blob_name)
+ return props is not None
+ except:
+ return False
+
+ def _data_disk_exists(self, service_name, deployment_name, role_name, lun):
+ try:
+ props = self.sms.get_data_disk(service_name, deployment_name, role_name, lun)
+ return props is not None
+ except:
+ return False
+
+ def _add_data_disk_from_blob_url(self, service_name, deployment_name, role_name, lun, label):
+ url = self._upload_disk_to_storage_blob('disk')
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, label, None, None, url)
+ self._wait_for_async(result.request_id)
+
+ def _create_vm_linux(self, service_name, deployment_name, role_name, target_container_name, target_blob_name):
+ image_name = LINUX_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + target_container_name + '/' + target_blob_name
+ system = LinuxConfigurationSet('computername', 'unittest', 'u7;9jbp!', True)
+ system.ssh = None
+ os_hd = OSVirtualHardDisk(image_name, media_link, disk_label = target_blob_name)
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('utendpoint', 'tcp', '59913', '3394'))
+
+ self._create_hosted_service(service_name)
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ def _create_vm_windows(self, service_name, deployment_name, role_name, target_container_name, target_blob_name):
+ image_name = WINDOWS_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + target_container_name + '/' + target_blob_name
+ system = WindowsConfigurationSet('computername', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+ os_hd = OSVirtualHardDisk(image_name, media_link, disk_label = target_blob_name)
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('utendpoint', 'tcp', '59913', '3394'))
+
+ self._create_hosted_service(service_name)
+ self._create_service_certificate(service_name, SERVICE_CERT_DATA, 'pfx', SERVICE_CERT_PASSWORD)
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ def _add_role_windows(self, service_name, deployment_name, role_name2):
+ image_name = WINDOWS_IMAGE_NAME
+ target_container_name = 'vhds'
+ target_blob_name = role_name2 + '.vhd'
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + target_container_name + '/' + target_blob_name
+
+ system = WindowsConfigurationSet('computer2', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ result = self.sms.add_role(service_name, deployment_name, role_name2, system, os_hd)
+ self._wait_for_async(result.request_id)
+
+ #--Test cases for storage accounts -----------------------------------
+ def test_list_storage_accounts(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.list_storage_accounts()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ storage = None
+ for temp in result:
+ if temp.service_name == self.storage_account_name:
+ storage = temp
+ break
+
+ self.assertIsNotNone(storage)
+ self.assertIsNotNone(storage.service_name)
+ self.assertIsNone(storage.storage_service_keys)
+ self.assertIsNotNone(storage.storage_service_properties)
+ self.assertIsNotNone(storage.storage_service_properties.affinity_group)
+ self.assertIsNotNone(storage.storage_service_properties.description)
+ self.assertIsNotNone(storage.storage_service_properties.geo_primary_region)
+ self.assertIsNotNone(storage.storage_service_properties.geo_replication_enabled)
+ self.assertIsNotNone(storage.storage_service_properties.geo_secondary_region)
+ self.assertIsNotNone(storage.storage_service_properties.label)
+ self.assertIsNotNone(storage.storage_service_properties.last_geo_failover_time)
+ self.assertIsNotNone(storage.storage_service_properties.location)
+ self.assertIsNotNone(storage.storage_service_properties.status)
+ self.assertIsNotNone(storage.storage_service_properties.status_of_primary)
+ self.assertIsNotNone(storage.storage_service_properties.status_of_secondary)
+ self.assertIsNotNone(storage.storage_service_properties.endpoints)
+ self.assertTrue(len(storage.storage_service_properties.endpoints) > 0)
+ self.assertIsNotNone(storage.extended_properties)
+ self.assertTrue(len(storage.extended_properties) > 0)
+
+ def test_get_storage_account_properties(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.get_storage_account_properties(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.service_name, self.storage_account_name)
+ self.assertIsNotNone(result.url)
+ self.assertIsNone(result.storage_service_keys)
+ self.assertIsNotNone(result.storage_service_properties)
+ self.assertIsNotNone(result.storage_service_properties.affinity_group)
+ self.assertIsNotNone(result.storage_service_properties.description)
+ self.assertIsNotNone(result.storage_service_properties.geo_primary_region)
+ self.assertIsNotNone(result.storage_service_properties.geo_replication_enabled)
+ self.assertIsNotNone(result.storage_service_properties.geo_secondary_region)
+ self.assertIsNotNone(result.storage_service_properties.label)
+ self.assertIsNotNone(result.storage_service_properties.last_geo_failover_time)
+ self.assertIsNotNone(result.storage_service_properties.location)
+ self.assertIsNotNone(result.storage_service_properties.status)
+ self.assertIsNotNone(result.storage_service_properties.status_of_primary)
+ self.assertIsNotNone(result.storage_service_properties.status_of_secondary)
+ self.assertIsNotNone(result.storage_service_properties.endpoints)
+ self.assertTrue(len(result.storage_service_properties.endpoints) > 0)
+ self.assertIsNotNone(result.extended_properties)
+ self.assertTrue(len(result.extended_properties) > 0)
+ self.assertIsNotNone(result.capabilities)
+ self.assertTrue(len(result.capabilities) > 0)
+
+ def test_get_storage_account_keys(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.get_storage_account_keys(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.storage_service_keys.primary)
+ self.assertIsNotNone(result.storage_service_keys.secondary)
+ self.assertIsNone(result.storage_service_properties)
+
+ def test_regenerate_storage_account_keys(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+ previous = self.sms.get_storage_account_keys(self.storage_account_name)
+
+ # Act
+ result = self.sms.regenerate_storage_account_keys(self.storage_account_name, 'Secondary')
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.storage_service_keys.primary)
+ self.assertIsNotNone(result.storage_service_keys.secondary)
+ self.assertIsNone(result.storage_service_properties)
+ self.assertEqual(result.storage_service_keys.primary, previous.storage_service_keys.primary)
+ self.assertNotEqual(result.storage_service_keys.secondary, previous.storage_service_keys.secondary)
+
+ def test_create_storage_account(self):
+ # Arrange
+ description = self.storage_account_name + 'description'
+ label = self.storage_account_name + 'label'
+
+ # Act
+ result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._storage_account_exists(self.storage_account_name))
+
+ def test_update_storage_account(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+ description = self.storage_account_name + 'descriptionupdate'
+ label = self.storage_account_name + 'labelupdate'
+
+ # Act
+ result = self.sms.update_storage_account(self.storage_account_name, description, label, False, {'ext1':'val1update', 'ext2':53, 'ext3':'brandnew'})
+
+ # Assert
+ self.assertIsNone(result)
+ props = self.sms.get_storage_account_properties(self.storage_account_name)
+ self.assertEqual(props.storage_service_properties.description, description)
+ self.assertEqual(props.storage_service_properties.label, label)
+ self.assertEqual(props.extended_properties['ext1'], 'val1update')
+ self.assertEqual(props.extended_properties['ext2'], '53')
+ self.assertEqual(props.extended_properties['ext3'], 'brandnew')
+
+ def test_delete_storage_account(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.delete_storage_account(self.storage_account_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._storage_account_exists(self.storage_account_name))
+
+ def test_check_storage_account_name_availability_not_available(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.check_storage_account_name_availability(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertFalse(result.result)
+
+ def test_check_storage_account_name_availability_available(self):
+ # Arrange
+
+ # Act
+ result = self.sms.check_storage_account_name_availability(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(result.result)
+
+ #--Test cases for hosted services ------------------------------------
+ def test_list_hosted_services(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.list_hosted_services()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ service = None
+ for temp in result:
+ if temp.service_name == self.hosted_service_name:
+ service = temp
+ break
+
+ self.assertIsNotNone(service)
+ self.assertIsNotNone(service.service_name)
+ self.assertIsNotNone(service.url)
+ self.assertIsNotNone(service.hosted_service_properties)
+ self.assertIsNotNone(service.hosted_service_properties.affinity_group)
+ self.assertIsNotNone(service.hosted_service_properties.date_created)
+ self.assertIsNotNone(service.hosted_service_properties.date_last_modified)
+ self.assertIsNotNone(service.hosted_service_properties.description)
+ self.assertIsNotNone(service.hosted_service_properties.label)
+ self.assertIsNotNone(service.hosted_service_properties.location)
+ self.assertIsNotNone(service.hosted_service_properties.status)
+ self.assertIsNotNone(service.hosted_service_properties.extended_properties['ext1'])
+ self.assertIsNotNone(service.hosted_service_properties.extended_properties['ext2'])
+ self.assertIsNone(service.deployments)
+
+ def test_get_hosted_service_properties(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.get_hosted_service_properties(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.hosted_service_properties)
+ self.assertIsNotNone(result.hosted_service_properties.affinity_group)
+ self.assertIsNotNone(result.hosted_service_properties.date_created)
+ self.assertIsNotNone(result.hosted_service_properties.date_last_modified)
+ self.assertIsNotNone(result.hosted_service_properties.description)
+ self.assertIsNotNone(result.hosted_service_properties.label)
+ self.assertIsNotNone(result.hosted_service_properties.location)
+ self.assertIsNotNone(result.hosted_service_properties.status)
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext1'])
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext2'])
+ self.assertIsNone(result.deployments)
+
+ def test_get_hosted_service_properties_with_embed_detail(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.get_hosted_service_properties(self.hosted_service_name, True)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.hosted_service_properties)
+ self.assertIsNotNone(result.hosted_service_properties.affinity_group)
+ self.assertIsNotNone(result.hosted_service_properties.date_created)
+ self.assertIsNotNone(result.hosted_service_properties.date_last_modified)
+ self.assertIsNotNone(result.hosted_service_properties.description)
+ self.assertIsNotNone(result.hosted_service_properties.label)
+ self.assertIsNotNone(result.hosted_service_properties.location)
+ self.assertIsNotNone(result.hosted_service_properties.status)
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext1'])
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext2'])
+
+ self.assertIsNotNone(result.deployments)
+ self.assertIsNotNone(result.deployments[0].configuration)
+ self.assertIsNotNone(result.deployments[0].created_time)
+ self.assertIsNotNone(result.deployments[0].deployment_slot)
+ self.assertIsNotNone(result.deployments[0].extended_properties['dep1'])
+ self.assertIsNotNone(result.deployments[0].extended_properties['dep2'])
+ self.assertIsNotNone(result.deployments[0].label)
+ self.assertIsNotNone(result.deployments[0].last_modified_time)
+ self.assertFalse(result.deployments[0].locked)
+ self.assertEqual(result.deployments[0].name, deployment_name)
+ self.assertIsNone(result.deployments[0].persistent_vm_downtime_info)
+ self.assertIsNotNone(result.deployments[0].private_id)
+ self.assertIsNotNone(result.deployments[0].role_list[0].os_version)
+ self.assertEqual(result.deployments[0].role_list[0].role_name, 'WorkerRole1')
+ self.assertFalse(result.deployments[0].rollback_allowed)
+ self.assertIsNotNone(result.deployments[0].sdk_version)
+ self.assertIsNotNone(result.deployments[0].status)
+ self.assertIsNotNone(result.deployments[0].upgrade_domain_count)
+ self.assertIsNone(result.deployments[0].upgrade_status)
+ self.assertIsNotNone(result.deployments[0].url)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].fqdn)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_error_code)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_fault_domain)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_name)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_size)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_state_details)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_status)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_upgrade_domain)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].ip_address)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].power_state)
+ self.assertEqual(result.deployments[0].role_instance_list[0].role_name, 'WorkerRole1')
+
+ def test_create_hosted_service(self):
+ # Arrange
+ label = 'pythonlabel'
+ description = 'python hosted service description'
+ location = 'West US'
+
+ # Act
+ result = self.sms.create_hosted_service(self.hosted_service_name, label, description, location, None, {'ext1':'val1','ext2':'val2'})
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._hosted_service_exists(self.hosted_service_name))
+
+ def test_update_hosted_service(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ label = 'ptvslabelupdate'
+ description = 'ptvs description update'
+
+ # Act
+ result = self.sms.update_hosted_service(self.hosted_service_name, label, description, {'ext1':'val1update','ext2':'val2update','ext3':'brandnew'})
+
+ # Assert
+ self.assertIsNone(result)
+ props = self.sms.get_hosted_service_properties(self.hosted_service_name)
+ self.assertEqual(props.hosted_service_properties.label, label)
+ self.assertEqual(props.hosted_service_properties.description, description)
+ self.assertEqual(props.hosted_service_properties.extended_properties['ext1'], 'val1update')
+ self.assertEqual(props.hosted_service_properties.extended_properties['ext2'], 'val2update')
+ self.assertEqual(props.hosted_service_properties.extended_properties['ext3'], 'brandnew')
+
+ def test_delete_hosted_service(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.delete_hosted_service(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._hosted_service_exists(self.hosted_service_name))
+
+ def test_get_deployment_by_slot(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.get_deployment_by_slot(self.hosted_service_name, 'Production')
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.name, deployment_name)
+ self.assertEqual(result.deployment_slot, 'Production')
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.configuration)
+
+ def test_get_deployment_by_name(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.name, deployment_name)
+ self.assertEqual(result.deployment_slot, 'Production')
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.configuration)
+
+ def test_create_deployment(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ configuration = base64.b64encode(DEPLOYMENT_ORIGINAL_CONFIG)
+ package_url = self._upload_default_package_to_storage_blob('WindowsAzure1Blob')
+
+ # Act
+ result = self.sms.create_deployment(self.hosted_service_name, 'production', 'WindowsAzure1', package_url, 'deploylabel', configuration)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._deployment_exists(self.hosted_service_name, 'WindowsAzure1'))
+
+ def test_delete_deployment(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.delete_deployment(self.hosted_service_name, deployment_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._deployment_exists(self.hosted_service_name, deployment_name))
+
+ def test_swap_deployment(self):
+ # Arrange
+ production_deployment_name = 'utdeployprod'
+ staging_deployment_name = 'utdeploystag'
+ self._create_hosted_service(self.hosted_service_name)
+ self._add_deployment(self.hosted_service_name, production_deployment_name, 'Production')
+ self._add_deployment(self.hosted_service_name, staging_deployment_name, 'Staging')
+
+ # Act
+ result = self.sms.swap_deployment(self.hosted_service_name, production_deployment_name, staging_deployment_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ deploy = self.sms.get_deployment_by_slot(self.hosted_service_name, 'Production')
+ self.assertIsNotNone(deploy)
+ self.assertEqual(deploy.name, staging_deployment_name)
+ self.assertEqual(deploy.deployment_slot, 'Production')
+
+ deploy = self.sms.get_deployment_by_slot(self.hosted_service_name, 'Staging')
+ self.assertIsNotNone(deploy)
+ self.assertEqual(deploy.name, production_deployment_name)
+ self.assertEqual(deploy.deployment_slot, 'Staging')
+
+ def test_change_deployment_configuration(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+
+ # Act
+ result = self.sms.change_deployment_configuration(self.hosted_service_name, deployment_name, configuration)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertTrue(props.configuration.find('Instances count="4"') >= 0)
+
+ def test_update_deployment_status(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.update_deployment_status(self.hosted_service_name, deployment_name, 'Suspended')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertEqual(props.status, 'Suspended')
+
+ def test_upgrade_deployment(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ package_url = self._upload_default_package_to_storage_blob('updated')
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+
+ # Act
+ result = self.sms.upgrade_deployment(self.hosted_service_name, deployment_name, 'Auto', package_url, configuration, 'upgraded', True)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertEqual(props.label, 'upgraded')
+ self.assertTrue(props.configuration.find('Instances count="4"') >= 0)
+
+ def test_walk_upgrade_domain(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ package_url = self._upload_default_package_to_storage_blob('updated')
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+ result = self.sms.upgrade_deployment(self.hosted_service_name, deployment_name, 'Manual', package_url, configuration, 'upgraded', True)
+ self._wait_for_async(result.request_id)
+
+ # Act
+ result = self.sms.walk_upgrade_domain(self.hosted_service_name, deployment_name, 0)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertEqual(props.label, 'upgraded')
+ self.assertTrue(props.configuration.find('Instances count="4"') >= 0)
+
+ def test_rollback_update_or_upgrade(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ package_url = self._upload_default_package_to_storage_blob('updated207')
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+
+ self.sms.upgrade_deployment(self.hosted_service_name, deployment_name, 'Auto', package_url, configuration, 'upgraded', True)
+ self._wait_for_rollback_allowed(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.rollback_update_or_upgrade(self.hosted_service_name, deployment_name, 'Auto', True)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertTrue(props.configuration.find('Instances count="2"') >= 0)
+
+ def test_reboot_role_instance(self):
+ # Arrange
+ role_instance_name = 'WorkerRole1_IN_0'
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ result = self.sms.update_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_role_instance_status(self.hosted_service_name, deployment_name, role_instance_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.reboot_role_instance(self.hosted_service_name, deployment_name, role_instance_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ status = self._get_role_instance_status(props, role_instance_name)
+ self.assertTrue(status == 'StoppedVM' or status =='ReadyRole')
+
+ def test_reimage_role_instance(self):
+ # Arrange
+ role_instance_name = 'WorkerRole1_IN_0'
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ result = self.sms.update_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_role_instance_status(self.hosted_service_name, deployment_name, role_instance_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.reimage_role_instance(self.hosted_service_name, deployment_name, role_instance_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ status = self._get_role_instance_status(props, role_instance_name)
+ self.assertTrue(status == 'StoppedVM' or status =='ReadyRole')
+
+ def test_check_hosted_service_name_availability_not_available(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.check_hosted_service_name_availability(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertFalse(result.result)
+
+ def test_check_hosted_service_name_availability_available(self):
+ # Arrange
+
+ # Act
+ result = self.sms.check_hosted_service_name_availability(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(result.result)
+
+ #--Test cases for service certificates -------------------------------
+ def test_list_service_certificates(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ self._create_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+
+ # Act
+ result = self.sms.list_service_certificates(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ url_part = '/' + self.hosted_service_name + '/'
+ cert = None
+ for temp in result:
+ if url_part in temp.certificate_url:
+ cert = temp
+ break
+
+ self.assertIsNotNone(cert)
+ self.assertIsNotNone(cert.certificate_url)
+ self.assertEqual(cert.thumbprint, SERVICE_CERT_THUMBPRINT)
+ self.assertEqual(cert.thumbprint_algorithm, SERVICE_CERT_THUMBALGO)
+ self.assertEqual(cert.data, SERVICE_CERT_DATA_PUBLIC)
+
+ def test_get_service_certificate(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ self._create_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+
+ # Act
+ result = self.sms.get_service_certificate(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.certificate_url, '')
+ self.assertEqual(result.thumbprint, '')
+ self.assertEqual(result.thumbprint_algorithm, '')
+ self.assertEqual(result.data, SERVICE_CERT_DATA_PUBLIC)
+
+ def test_add_service_certificate(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.add_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._service_certificate_exists(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT))
+
+ def test_delete_service_certificate(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ self._create_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+
+ # Act
+ result = self.sms.delete_service_certificate(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._service_certificate_exists(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT))
+
+ #--Test cases for management certificates ----------------------------
+ def test_list_management_certificates(self):
+ # Arrange
+ self._create_management_certificate(self.management_certificate_name)
+
+ # Act
+ result = self.sms.list_management_certificates()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ cert = None
+ for temp in result:
+ if temp.subscription_certificate_thumbprint == self.management_certificate_name:
+ cert = temp
+ break
+
+ self.assertIsNotNone(cert)
+ self.assertIsNotNone(cert.created)
+ self.assertEqual(cert.subscription_certificate_public_key, MANAGEMENT_CERT_PUBLICKEY)
+ self.assertEqual(cert.subscription_certificate_data, MANAGEMENT_CERT_DATA)
+ self.assertEqual(cert.subscription_certificate_thumbprint, self.management_certificate_name)
+
+ def test_get_management_certificate(self):
+ # Arrange
+ self._create_management_certificate(self.management_certificate_name)
+
+ # Act
+ result = self.sms.get_management_certificate(self.management_certificate_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.created)
+ self.assertEqual(result.subscription_certificate_public_key, MANAGEMENT_CERT_PUBLICKEY)
+ self.assertEqual(result.subscription_certificate_data, MANAGEMENT_CERT_DATA)
+ self.assertEqual(result.subscription_certificate_thumbprint, self.management_certificate_name)
+
+ def test_add_management_certificate(self):
+ # Arrange
+ public_key = MANAGEMENT_CERT_PUBLICKEY
+ data = MANAGEMENT_CERT_DATA
+
+ # Act
+ result = self.sms.add_management_certificate(public_key, self.management_certificate_name, data)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._management_certificate_exists(self.management_certificate_name))
+
+ def test_delete_management_certificate(self):
+ # Arrange
+ self._create_management_certificate(self.management_certificate_name)
+
+ # Act
+ result = self.sms.delete_management_certificate(self.management_certificate_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._management_certificate_exists(self.management_certificate_name))
+
+ #--Test cases for affinity groups ------------------------------------
+ def test_list_affinity_groups(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+
+ # Act
+ result = self.sms.list_affinity_groups()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ group = None
+ for temp in result:
+ if temp.name == self.affinity_group_name:
+ group = temp
+ break
+
+ self.assertIsNotNone(group)
+ self.assertIsNotNone(group.name)
+ self.assertIsNotNone(group.label)
+ self.assertIsNotNone(group.description)
+ self.assertIsNotNone(group.location)
+ self.assertIsNotNone(group.capabilities)
+ self.assertTrue(len(group.capabilities) > 0)
+
+ def test_get_affinity_group_properties(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+ self.sms.create_hosted_service(self.hosted_service_name, 'affgrptestlabel', 'affgrptestdesc', None, self.affinity_group_name)
+ self.sms.create_storage_account(self.storage_account_name, self.storage_account_name + 'desc', self.storage_account_name + 'label', self.affinity_group_name)
+
+ # Act
+ result = self.sms.get_affinity_group_properties(self.affinity_group_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.name, self.affinity_group_name)
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.description)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.hosted_services[0])
+ self.assertEqual(result.hosted_services[0].service_name, self.hosted_service_name)
+ self.assertEqual(result.hosted_services[0].hosted_service_properties.affinity_group, self.affinity_group_name)
+ # not sure why azure does not return any storage service
+ self.assertTrue(len(result.capabilities) > 0)
+
+ def test_create_affinity_group(self):
+ # Arrange
+ label = 'tstmgmtaffgrp'
+ description = 'tstmgmt affinity group'
+
+ # Act
+ result = self.sms.create_affinity_group(self.affinity_group_name, label, 'West US', description)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._affinity_group_exists(self.affinity_group_name))
+
+ def test_update_affinity_group(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+ label = 'tstlabelupdate'
+ description = 'testmgmt affinity group update'
+
+ # Act
+ result = self.sms.update_affinity_group(self.affinity_group_name, label, description)
+
+ # Assert
+ self.assertIsNone(result)
+ props = self.sms.get_affinity_group_properties(self.affinity_group_name)
+ self.assertEqual(props.label, label)
+ self.assertEqual(props.description, description)
+
+ def test_delete_affinity_group(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+
+ # Act
+ result = self.sms.delete_affinity_group(self.affinity_group_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._affinity_group_exists(self.affinity_group_name))
+
+ #--Test cases for locations ------------------------------------------
+ def test_list_locations(self):
+ # Arrange
+
+ # Act
+ result = self.sms.list_locations()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+ self.assertIsNotNone(result[0].name)
+ self.assertIsNotNone(result[0].display_name)
+ self.assertIsNotNone(result[0].available_services)
+ self.assertTrue(len(result[0].available_services) > 0)
+
+ #--Test cases for retrieving operating system information ------------
+ def test_list_operating_systems(self):
+ # Arrange
+
+ # Act
+ result = self.sms.list_operating_systems()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 20)
+ self.assertIsNotNone(result[0].family)
+ self.assertIsNotNone(result[0].family_label)
+ self.assertIsNotNone(result[0].is_active)
+ self.assertIsNotNone(result[0].is_default)
+ self.assertIsNotNone(result[0].label)
+ self.assertIsNotNone(result[0].version)
+
+ def test_list_operating_system_families(self):
+ # Arrange
+
+ # Act
+ result = self.sms.list_operating_system_families()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+ self.assertIsNotNone(result[0].name)
+ self.assertIsNotNone(result[0].label)
+ self.assertTrue(len(result[0].operating_systems) > 0)
+ self.assertIsNotNone(result[0].operating_systems[0].version)
+ self.assertIsNotNone(result[0].operating_systems[0].label)
+ self.assertIsNotNone(result[0].operating_systems[0].is_default)
+ self.assertIsNotNone(result[0].operating_systems[0].is_active)
+
+ #--Test cases for retrieving subscription history --------------------
+ def test_get_subscription(self):
+ # Arrange
+
+ # Act
+ result = self.sms.get_subscription()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.subscription_id, credentials.getSubscriptionId())
+ self.assertIsNotNone(result.account_admin_live_email_id)
+ self.assertIsNotNone(result.service_admin_live_email_id)
+ self.assertIsNotNone(result.subscription_name)
+ self.assertIsNotNone(result.subscription_status)
+ self.assertTrue(result.current_core_count >= 0)
+ self.assertTrue(result.current_hosted_services >= 0)
+ self.assertTrue(result.current_storage_accounts >= 0)
+ self.assertTrue(result.max_core_count > 0)
+ self.assertTrue(result.max_dns_servers > 0)
+ self.assertTrue(result.max_hosted_services > 0)
+ self.assertTrue(result.max_local_network_sites > 0)
+ self.assertTrue(result.max_storage_accounts > 0)
+ self.assertTrue(result.max_virtual_network_sites > 0)
+
+ #--Test cases for virtual machines -----------------------------------
+ def test_get_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, self.container_name, role_name + '.vhd')
+
+ # Act
+ result = self.sms.get_role(service_name, deployment_name, role_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.role_name, role_name)
+ self.assertIsNotNone(result.role_size)
+ self.assertIsNotNone(result.role_type)
+ self.assertIsNotNone(result.os_virtual_hard_disk)
+ self.assertIsNotNone(result.os_virtual_hard_disk.disk_label)
+ self.assertIsNotNone(result.os_virtual_hard_disk.disk_name)
+ self.assertIsNotNone(result.os_virtual_hard_disk.host_caching)
+ self.assertIsNotNone(result.os_virtual_hard_disk.media_link)
+ self.assertIsNotNone(result.os_virtual_hard_disk.os)
+ self.assertIsNotNone(result.os_virtual_hard_disk.source_image_name)
+ self.assertIsNotNone(result.data_virtual_hard_disks)
+ self.assertIsNotNone(result.configuration_sets)
+ self.assertIsNotNone(result.configuration_sets[0])
+ self.assertIsNotNone(result.configuration_sets[0].configuration_set_type)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].protocol)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].port)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].name)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].local_port)
+
+ def test_create_virtual_machine_deployment_linux(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+ image_name = LINUX_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name + '.vhd'
+
+ self._create_hosted_service(service_name)
+
+ # Act
+ system = LinuxConfigurationSet('unittest', 'unittest', 'u7;9jbp!', True)
+ system.ssh = None
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('endpnameL', 'tcp', '59913', '3394'))
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name))
+
+ def test_create_virtual_machine_deployment_windows(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+ image_name = WINDOWS_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name + '.vhd'
+
+ self._create_hosted_service(service_name)
+ self._create_service_certificate(service_name, SERVICE_CERT_DATA, 'pfx', SERVICE_CERT_PASSWORD)
+
+ # Act
+ system = WindowsConfigurationSet('unittest', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('endpnameW', 'tcp', '59917', '3395'))
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name))
+
+ def test_add_role_linux(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name1 = self.hosted_service_name + 'a'
+ role_name2 = self.hosted_service_name + 'b'
+
+ self._create_vm_linux(service_name, deployment_name, role_name1, self.container_name, role_name1 + '.vhd')
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name1, 'ReadyRole')
+
+ image_name = LINUX_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name2 + '.vhd'
+
+ # Act
+ system = LinuxConfigurationSet('computer2', 'unittest', 'u7;9jbp!', True)
+ system.ssh = None
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ result = self.sms.add_role(service_name, deployment_name, role_name2, system, os_hd)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name1))
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name2))
+
+ def test_add_role_windows(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name1 = self.hosted_service_name + 'a'
+ role_name2 = self.hosted_service_name + 'b'
+
+ self._create_vm_windows(service_name, deployment_name, role_name1, self.container_name, role_name1 + '.vhd')
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name1, 'ReadyRole')
+
+ image_name = WINDOWS_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name2 + '.vhd'
+
+ # Act
+ system = WindowsConfigurationSet('computer2', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ result = self.sms.add_role(service_name, deployment_name, role_name2, system, os_hd)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name1))
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name2))
+
+ def test_update_role(self):
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('endupdate', 'tcp', '50055', '5555'))
+
+ # Act
+ result = self.sms.update_role(service_name, deployment_name, role_name, network_config=network, role_size='Medium')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ role = self.sms.get_role(service_name, deployment_name, role_name)
+ self.assertEqual(role.role_size, 'Medium')
+
+ def test_delete_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name1 = self.hosted_service_name + 'a'
+ role_name2 = self.hosted_service_name + 'b'
+
+ self._create_vm_windows(service_name, deployment_name, role_name1, 'vhds', role_name1)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name1, 'ReadyRole')
+
+ self._add_role_windows(service_name, deployment_name, role_name2)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name2, 'ReadyRole')
+
+ # Act
+ result = self.sms.delete_role(service_name, deployment_name, role_name2)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name1))
+ self.assertFalse(self._role_exists(service_name, deployment_name, role_name2))
+
+ def test_shutdown_start_and_restart_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.shutdown_role(service_name, deployment_name, role_name)
+ self._wait_for_async(result.request_id)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'StoppedVM')
+
+ # Act
+ result = self.sms.start_role(service_name, deployment_name, role_name)
+ self._wait_for_async(result.request_id)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.restart_role(service_name, deployment_name, role_name)
+ self._wait_for_async(result.request_id)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ def test_capture_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ image_name = self.os_image_name
+ image_label = role_name + 'captured'
+
+ # Act
+ result = self.sms.capture_role(service_name, deployment_name, role_name, 'Delete', image_name, image_label)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._os_image_exists(self.os_image_name))
+
+ #--Test cases for virtual machine images -----------------------------
+ def test_list_os_images(self):
+ # Arrange
+ media_url = LINUX_OS_VHD_URL
+ os = 'Linux'
+ self._create_os_image(self.os_image_name, media_url, os)
+
+ # Act
+ result = self.sms.list_os_images()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ image = None
+ for temp in result:
+ if temp.name == self.os_image_name:
+ image = temp
+ break
+
+ self.assertIsNotNone(image)
+ self.assertIsNotNone(image.affinity_group)
+ self.assertIsNotNone(image.category)
+ self.assertIsNotNone(image.description)
+ self.assertIsNotNone(image.eula)
+ self.assertIsNotNone(image.label)
+ self.assertIsNotNone(image.location)
+ self.assertIsNotNone(image.logical_size_in_gb)
+ self.assertEqual(image.media_link, media_url)
+ self.assertEqual(image.name, self.os_image_name)
+ self.assertEqual(image.os, os)
+
+ def test_get_os_image(self):
+ # Arrange
+ media_url = LINUX_OS_VHD_URL
+ os = 'Linux'
+ self._create_os_image(self.os_image_name, media_url, os)
+
+ # Act
+ result = self.sms.get_os_image(self.os_image_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.affinity_group)
+ self.assertIsNotNone(result.category)
+ self.assertIsNotNone(result.description)
+ self.assertIsNotNone(result.eula)
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.logical_size_in_gb)
+ self.assertEqual(result.media_link, media_url)
+ self.assertEqual(result.name, self.os_image_name)
+ self.assertEqual(result.os, os)
+
+ def test_add_os_image(self):
+ # Arrange
+
+ # Act
+ result = self.sms.add_os_image('utcentosimg', LINUX_OS_VHD_URL, self.os_image_name, 'Linux')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._os_image_exists(self.os_image_name))
+
+ def test_update_os_image(self):
+ # Arrange
+ self._create_os_image(self.os_image_name, LINUX_OS_VHD_URL, 'Linux')
+
+ # Act
+ result = self.sms.update_os_image(self.os_image_name, 'newlabel', LINUX_OS_VHD_URL, self.os_image_name, 'Linux')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ image = self.sms.get_os_image(self.os_image_name)
+ self.assertEqual(image.label, 'newlabel')
+ self.assertEqual(image.os, 'Linux')
+
+ def test_delete_os_image(self):
+ # Arrange
+ self._create_os_image(self.os_image_name, LINUX_OS_VHD_URL, 'Linux')
+
+ # Act
+ result = self.sms.delete_os_image(self.os_image_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._os_image_exists(self.os_image_name))
+
+ #--Test cases for virtual machine disks ------------------------------
+ def test_get_data_disk(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 1
+ self._add_data_disk_from_blob_url(service_name, deployment_name, role_name, lun, 'mylabel')
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.get_data_disk(service_name, deployment_name, role_name, lun)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.disk_label, 'mylabel')
+ self.assertIsNotNone(result.disk_name)
+ self.assertIsNotNone(result.host_caching)
+ self.assertIsNotNone(result.logical_disk_size_in_gb)
+ self.assertEqual(result.lun, lun)
+ self.assertIsNotNone(result.media_link)
+
+ def test_add_data_disk_from_disk_name(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 2
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, 'testdisklabel', self.disk_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+
+ def test_add_data_disk_from_blob_url(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 3
+ label = 'disk' + str(lun)
+ url = self._upload_disk_to_storage_blob('disk')
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, label, None, None, url)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+
+ def test_update_data_disk(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 1
+ updated_lun = 10
+ self._add_data_disk_from_blob_url(service_name, deployment_name, role_name, lun, 'mylabel')
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.update_data_disk(service_name, deployment_name, role_name, lun, None, None, updated_lun)
+ self._wait_for_async(result.request_id)
+ self.data_disk_info = (service_name, deployment_name, role_name, updated_lun)
+
+ # Assert
+ self.assertFalse(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+ self.assertTrue(self._data_disk_exists(service_name, deployment_name, role_name, updated_lun))
+
+ def test_delete_data_disk(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 5
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, 'testdisklabel', self.disk_name)
+ self._wait_for_async(result.request_id)
+
+ # Act
+ result = self.sms.delete_data_disk(service_name, deployment_name, role_name, lun)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+
+ #--Test cases for virtual machine disks ------------------------------
+ def test_list_disks(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.list_disks()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ disk = None
+ for temp in result:
+ if temp.name == self.disk_name:
+ disk = temp
+ break
+
+ self.assertIsNotNone(disk)
+ self.assertIsNotNone(disk.os)
+ self.assertIsNotNone(disk.location)
+ self.assertIsNotNone(disk.logical_disk_size_in_gb)
+ self.assertIsNotNone(disk.media_link)
+ self.assertIsNotNone(disk.name)
+ self.assertIsNotNone(disk.source_image_name)
+
+ def test_get_disk_unattached(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.get_disk(self.disk_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.os)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.logical_disk_size_in_gb)
+ self.assertEqual(result.media_link, url)
+ self.assertEqual(result.name, self.disk_name)
+ self.assertIsNotNone(result.source_image_name)
+ self.assertIsNone(result.attached_to)
+
+ def test_get_disk_attached(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 6
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, 'testdisklabel', self.disk_name)
+ self._wait_for_async(result.request_id)
+
+ # Act
+ result = self.sms.get_disk(self.disk_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.os)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.logical_disk_size_in_gb)
+ self.assertIsNotNone(result.media_link)
+ self.assertIsNotNone(result.name)
+ self.assertIsNotNone(result.source_image_name)
+ self.assertIsNotNone(result.attached_to)
+ self.assertEqual(result.attached_to.deployment_name, deployment_name)
+ self.assertEqual(result.attached_to.hosted_service_name, service_name)
+ self.assertEqual(result.attached_to.role_name, role_name)
+
+ def test_add_disk(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+
+ # Act
+ result = self.sms.add_disk(False, 'ptvslabel', url, self.disk_name, 'Windows')
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._disk_exists(self.disk_name))
+
+ def test_update_disk(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ urlupdate = self._upload_disk_to_storage_blob('diskupdate')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.update_disk(self.disk_name, False, 'ptvslabelupdate', urlupdate, self.disk_name, 'Windows')
+
+ # Assert
+ self.assertIsNone(result)
+ disk = self.sms.get_disk(self.disk_name)
+ self.assertEqual(disk.name, self.disk_name)
+ self.assertEqual(disk.label, 'ptvslabelupdate')
+ self.assertEqual(disk.media_link, url)
+
+ def test_delete_disk(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.delete_disk(self.disk_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._disk_exists(self.disk_name))
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/azuretest/test_sharedaccesssignature.py b/test/azuretest/test_sharedaccesssignature.py
index c602d374bf3e..76c40cf7e900 100644
--- a/test/azuretest/test_sharedaccesssignature.py
+++ b/test/azuretest/test_sharedaccesssignature.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/test_tableservice.py b/test/azuretest/test_tableservice.py
index 3c29c15e2c06..49be76c057ff 100644
--- a/test/azuretest/test_tableservice.py
+++ b/test/azuretest/test_tableservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,6 +40,11 @@ def setUp(self):
self.tc = TableService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.tc.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
table_base_name = u'testtable%s' % (__uid)
self.table_name = getUniqueNameBasedOnCurrentTime(table_base_name)
diff --git a/test/azuretest/util.py b/test/azuretest/util.py
index 6ea3461d7c0d..bd8d52baa001 100644
--- a/test/azuretest/util.py
+++ b/test/azuretest/util.py
@@ -1,15 +1,17 @@
-#------------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation.
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
#
-# This source code is subject to terms and conditions of the Apache License,
-# Version 2.0. A copy of the license can be found in the License.html file at
-# the root of this distribution. If you cannot locate the Apache License,
-# Version 2.0, please send an email to vspython@microsoft.com. By using this
-# source code in any fashion, you are agreeing to be bound by the terms of the
-# Apache License, Version 2.0.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# You must not remove this notice, or any other, from this software.
-#------------------------------------------------------------------------------
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
import json
import os
@@ -49,6 +51,12 @@ def __init__(self):
with open(tmpName, "r") as f:
self.ns = json.load(f)
+ def getManagementCertFile(self):
+ return self.ns[u'managementcertfile']
+
+ def getSubscriptionId(self):
+ return self.ns[u'subscriptionid']
+
def getServiceBusKey(self):
return self.ns[u'servicebuskey']
@@ -61,6 +69,15 @@ def getStorageServicesKey(self):
def getStorageServicesName(self):
return self.ns[u'storageservicesname']
+ def getLinuxOSVHD(self):
+ return self.ns[u'linuxosvhd']
+
+ def getProxyHost(self):
+ return self.ns[u'proxyhost']
+
+ def getProxyPort(self):
+ return self.ns[u'proxyport']
+
credentials = Credentials()
def getUniqueTestRunID():
diff --git a/test/run.bat b/test/run.bat
index 1586880606e5..b610556263da 100644
--- a/test/run.bat
+++ b/test/run.bat
@@ -1,16 +1,18 @@
@echo OFF
SETLOCAL
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls